1/*
2 * Copyright (C) 2011-2021 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27#include "DFGSpeculativeJIT.h"
28
29#if ENABLE(DFG_JIT)
30
31#include "BinarySwitch.h"
32#include "DFGAbstractInterpreterInlines.h"
33#include "DFGArrayifySlowPathGenerator.h"
34#include "DFGCallArrayAllocatorSlowPathGenerator.h"
35#include "DFGCallCreateDirectArgumentsSlowPathGenerator.h"
36#include "DFGCapabilities.h"
37#include "DFGClobberize.h"
38#include "DFGMayExit.h"
39#include "DFGOSRExitFuzz.h"
40#include "DFGSaneStringGetByValSlowPathGenerator.h"
41#include "DFGSlowPathGenerator.h"
42#include "DFGSnippetParams.h"
43#include "DirectArguments.h"
44#include "DisallowMacroScratchRegisterUsage.h"
45#include "JITBitAndGenerator.h"
46#include "JITBitOrGenerator.h"
47#include "JITBitXorGenerator.h"
48#include "JITDivGenerator.h"
49#include "JITLeftShiftGenerator.h"
50#include "JITRightShiftGenerator.h"
51#include "JITSizeStatistics.h"
52#include "JSArrayIterator.h"
53#include "JSAsyncFunction.h"
54#include "JSAsyncGeneratorFunction.h"
55#include "JSBoundFunction.h"
56#include "JSCInlines.h"
57#include "JSGeneratorFunction.h"
58#include "JSImmutableButterfly.h"
59#include "JSLexicalEnvironment.h"
60#include "JSMapIterator.h"
61#include "JSPropertyNameEnumerator.h"
62#include "JSSetIterator.h"
63#include "LLIntThunks.h"
64#include "RegExpObject.h"
65#include "ScopedArguments.h"
66#include "TypeProfilerLog.h"
67#include "WeakMapImpl.h"
68#include <wtf/BitVector.h>
69#include <wtf/Box.h>
70#include <wtf/MathExtras.h>
71
72namespace JSC { namespace DFG {
73
74DEFINE_ALLOCATOR_WITH_HEAP_IDENTIFIER(SpeculativeJIT);
75
76SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
77 : m_jit(jit)
78 , m_graph(m_jit.graph())
79 , m_currentNode(nullptr)
80 , m_lastGeneratedNode(LastNodeType)
81 , m_indexInBlock(0)
82 , m_generationInfo(m_jit.graph().frameRegisterCount())
83 , m_compileOkay(true)
84 , m_state(m_jit.graph())
85 , m_interpreter(m_jit.graph(), m_state)
86 , m_stream(&jit.jitCode()->variableEventStream)
87 , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
88{
89}
90
91SpeculativeJIT::~SpeculativeJIT()
92{
93}
94
95void SpeculativeJIT::emitAllocateRawObject(GPRReg resultGPR, RegisteredStructure structure, GPRReg storageGPR, unsigned numElements, unsigned vectorLength)
96{
97 ASSERT(!isCopyOnWrite(structure->indexingMode()));
98 IndexingType indexingType = structure->indexingType();
99 bool hasIndexingHeader = hasIndexedProperties(indexingType);
100
101 unsigned inlineCapacity = structure->inlineCapacity();
102 unsigned outOfLineCapacity = structure->outOfLineCapacity();
103
104 GPRTemporary scratch(this);
105 GPRTemporary scratch2(this);
106 GPRReg scratchGPR = scratch.gpr();
107 GPRReg scratch2GPR = scratch2.gpr();
108
109 ASSERT(vectorLength >= numElements);
110 vectorLength = Butterfly::optimalContiguousVectorLength(structure.get(), vectorLength);
111
112 JITCompiler::JumpList slowCases;
113
114 size_t size = 0;
115 if (hasIndexingHeader)
116 size += vectorLength * sizeof(JSValue) + sizeof(IndexingHeader);
117 size += outOfLineCapacity * sizeof(JSValue);
118
119 m_jit.move(TrustedImmPtr(nullptr), storageGPR);
120
121 VM& vm = this->vm();
122 if (size) {
123 if (Allocator allocator = vm.jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists)) {
124 m_jit.emitAllocate(storageGPR, JITAllocator::constant(allocator), scratchGPR, scratch2GPR, slowCases);
125
126 m_jit.addPtr(
127 TrustedImm32(outOfLineCapacity * sizeof(JSValue) + sizeof(IndexingHeader)),
128 storageGPR);
129
130 if (hasIndexingHeader)
131 m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
132 } else
133 slowCases.append(m_jit.jump());
134 }
135
136 Allocator allocator;
137 if (structure->type() == JSType::ArrayType)
138 allocator = allocatorForNonVirtualConcurrently<JSArray>(vm, JSArray::allocationSize(inlineCapacity), AllocatorForMode::AllocatorIfExists);
139 else
140 allocator = allocatorForNonVirtualConcurrently<JSFinalObject>(vm, JSFinalObject::allocationSize(inlineCapacity), AllocatorForMode::AllocatorIfExists);
141 if (allocator) {
142 emitAllocateJSObject(resultGPR, JITAllocator::constant(allocator), scratchGPR, TrustedImmPtr(structure), storageGPR, scratch2GPR, slowCases);
143 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
144 } else
145 slowCases.append(m_jit.jump());
146
147 // I want a slow path that also loads out the storage pointer, and that's
148 // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
149 // of work for a very small piece of functionality. :-/
150 addSlowPathGenerator(makeUnique<CallArrayAllocatorSlowPathGenerator>(
151 slowCases, this, operationNewRawObject, resultGPR, storageGPR,
152 structure, vectorLength));
153
154 if (numElements < vectorLength) {
155#if USE(JSVALUE64)
156 if (hasDouble(structure->indexingType()))
157 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), scratchGPR);
158 else
159 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), scratchGPR);
160 for (unsigned i = numElements; i < vectorLength; ++i)
161 m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
162#else
163 EncodedValueDescriptor value;
164 if (hasDouble(structure->indexingType()))
165 value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, PNaN));
166 else
167 value.asInt64 = JSValue::encode(JSValue());
168 for (unsigned i = numElements; i < vectorLength; ++i) {
169 m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
170 m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
171 }
172#endif
173 }
174
175 if (hasIndexingHeader)
176 m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
177
178 m_jit.emitInitializeOutOfLineStorage(storageGPR, structure->outOfLineCapacity());
179
180 m_jit.mutatorFence(vm);
181}
182
183void SpeculativeJIT::emitGetLength(InlineCallFrame* inlineCallFrame, GPRReg lengthGPR, bool includeThis)
184{
185 if (inlineCallFrame && !inlineCallFrame->isVarargs())
186 m_jit.move(TrustedImm32(inlineCallFrame->argumentCountIncludingThis - !includeThis), lengthGPR);
187 else {
188 VirtualRegister argumentCountRegister = m_jit.argumentCount(inlineCallFrame);
189 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
190 if (!includeThis)
191 m_jit.sub32(TrustedImm32(1), lengthGPR);
192 }
193}
194
195void SpeculativeJIT::emitGetLength(CodeOrigin origin, GPRReg lengthGPR, bool includeThis)
196{
197 emitGetLength(origin.inlineCallFrame(), lengthGPR, includeThis);
198}
199
200void SpeculativeJIT::emitGetCallee(CodeOrigin origin, GPRReg calleeGPR)
201{
202 auto* inlineCallFrame = origin.inlineCallFrame();
203 if (inlineCallFrame) {
204 if (inlineCallFrame->isClosureCall) {
205 m_jit.loadPtr(
206 JITCompiler::addressFor(inlineCallFrame->calleeRecovery.virtualRegister()),
207 calleeGPR);
208 } else {
209 m_jit.move(
210 TrustedImmPtr::weakPointer(m_jit.graph(), inlineCallFrame->calleeRecovery.constant().asCell()),
211 calleeGPR);
212 }
213 } else
214 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), calleeGPR);
215}
216
217void SpeculativeJIT::emitGetArgumentStart(CodeOrigin origin, GPRReg startGPR)
218{
219 m_jit.addPtr(
220 TrustedImm32(
221 JITCompiler::argumentsStart(origin).offset() * static_cast<int>(sizeof(Register))),
222 GPRInfo::callFrameRegister, startGPR);
223}
224
225MacroAssembler::Jump SpeculativeJIT::emitOSRExitFuzzCheck()
226{
227 if (!Options::useOSRExitFuzz()
228 || !canUseOSRExitFuzzing(m_jit.graph().baselineCodeBlockFor(m_origin.semantic))
229 || !doOSRExitFuzzing())
230 return MacroAssembler::Jump();
231
232 MacroAssembler::Jump result;
233
234 m_jit.pushToSave(GPRInfo::regT0);
235 m_jit.load32(&g_numberOfOSRExitFuzzChecks, GPRInfo::regT0);
236 m_jit.add32(TrustedImm32(1), GPRInfo::regT0);
237 m_jit.store32(GPRInfo::regT0, &g_numberOfOSRExitFuzzChecks);
238 unsigned atOrAfter = Options::fireOSRExitFuzzAtOrAfter();
239 unsigned at = Options::fireOSRExitFuzzAt();
240 if (at || atOrAfter) {
241 unsigned threshold;
242 MacroAssembler::RelationalCondition condition;
243 if (atOrAfter) {
244 threshold = atOrAfter;
245 condition = MacroAssembler::Below;
246 } else {
247 threshold = at;
248 condition = MacroAssembler::NotEqual;
249 }
250 MacroAssembler::Jump ok = m_jit.branch32(
251 condition, GPRInfo::regT0, MacroAssembler::TrustedImm32(threshold));
252 m_jit.popToRestore(GPRInfo::regT0);
253 result = m_jit.jump();
254 ok.link(&m_jit);
255 }
256 m_jit.popToRestore(GPRInfo::regT0);
257
258 return result;
259}
260
261void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
262{
263 if (!m_compileOkay)
264 return;
265 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
266 if (fuzzJump.isSet()) {
267 JITCompiler::JumpList jumpsToFail;
268 jumpsToFail.append(fuzzJump);
269 jumpsToFail.append(jumpToFail);
270 m_jit.appendExitInfo(jumpsToFail);
271 } else
272 m_jit.appendExitInfo(jumpToFail);
273 m_jit.appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
274}
275
276void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
277{
278 if (!m_compileOkay)
279 return;
280 JITCompiler::Jump fuzzJump = emitOSRExitFuzzCheck();
281 if (fuzzJump.isSet()) {
282 JITCompiler::JumpList myJumpsToFail;
283 myJumpsToFail.append(jumpsToFail);
284 myJumpsToFail.append(fuzzJump);
285 m_jit.appendExitInfo(myJumpsToFail);
286 } else
287 m_jit.appendExitInfo(jumpsToFail);
288 m_jit.appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
289}
290
291OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
292{
293 if (!m_compileOkay)
294 return OSRExitJumpPlaceholder();
295 unsigned index = m_jit.m_osrExit.size();
296 m_jit.appendExitInfo();
297 m_jit.appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size()));
298 return OSRExitJumpPlaceholder(index);
299}
300
301OSRExitJumpPlaceholder SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
302{
303 return speculationCheck(kind, jsValueSource, nodeUse.node());
304}
305
306void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
307{
308 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
309}
310
311void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
312{
313 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
314}
315
316void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
317{
318 if (!m_compileOkay)
319 return;
320 unsigned recoveryIndex = m_jit.appendSpeculationRecovery(recovery);
321 m_jit.appendExitInfo(jumpToFail);
322 m_jit.appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(m_currentNode, node), this, m_stream->size(), recoveryIndex));
323}
324
325void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
326{
327 speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
328}
329
330void SpeculativeJIT::emitInvalidationPoint(Node* node)
331{
332 if (!m_compileOkay)
333 return;
334 OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
335 m_jit.appendOSRExit(OSRExit(
336 UncountableInvalidation, JSValueSource(), MethodOfGettingAValueProfile(),
337 this, m_stream->size()));
338 info.m_replacementSource = m_jit.watchpointLabel();
339 RELEASE_ASSERT(info.m_replacementSource.isSet());
340 noResult(node);
341}
342
343void SpeculativeJIT::unreachable(Node* node)
344{
345 m_compileOkay = false;
346 m_jit.abortWithReason(DFGUnreachableNode, node->op());
347}
348
349void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
350{
351 if (!m_compileOkay)
352 return;
353 speculationCheck(kind, jsValueRegs, node, m_jit.jump());
354 m_compileOkay = false;
355 if (verboseCompilationEnabled())
356 dataLog("Bailing compilation.\n");
357}
358
359void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
360{
361 terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
362}
363
364void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, ExitKind exitKind)
365{
366 ASSERT(needsTypeCheck(edge, typesPassedThrough));
367 m_interpreter.filter(edge, typesPassedThrough);
368 speculationCheck(exitKind, source, edge.node(), jumpToFail);
369}
370
371void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::JumpList jumpListToFail, ExitKind exitKind)
372{
373 ASSERT(needsTypeCheck(edge, typesPassedThrough));
374 m_interpreter.filter(edge, typesPassedThrough);
375 speculationCheck(exitKind, source, edge.node(), jumpListToFail);
376}
377
378RegisterSet SpeculativeJIT::usedRegisters()
379{
380 RegisterSet result;
381
382 for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
383 GPRReg gpr = GPRInfo::toRegister(i);
384 if (m_gprs.isInUse(gpr))
385 result.set(gpr);
386 }
387 for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
388 FPRReg fpr = FPRInfo::toRegister(i);
389 if (m_fprs.isInUse(fpr))
390 result.set(fpr);
391 }
392
393 // FIXME: This is overly conservative. We could subtract out those callee-saves that we
394 // actually saved.
395 // https://bugs.webkit.org/show_bug.cgi?id=185686
396 result.merge(RegisterSet::stubUnavailableRegisters());
397
398 return result;
399}
400
401void SpeculativeJIT::addSlowPathGenerator(std::unique_ptr<SlowPathGenerator> slowPathGenerator)
402{
403 m_slowPathGenerators.append(WTFMove(slowPathGenerator));
404}
405
406void SpeculativeJIT::addSlowPathGeneratorLambda(Function<void()>&& lambda)
407{
408 m_slowPathLambdas.append(SlowPathLambda{ WTFMove(lambda), m_currentNode, static_cast<unsigned>(m_stream->size()) });
409}
410
411void SpeculativeJIT::runSlowPathGenerators(PCToCodeOriginMapBuilder& pcToCodeOriginMapBuilder)
412{
413 auto markSlowPathIfNeeded = [&] (Node* node) {
414 std::optional<JITSizeStatistics::Marker> sizeMarker;
415 if (UNLIKELY(Options::dumpDFGJITSizeStatistics())) {
416 String id = makeString("DFG_slow_", m_graph.opName(node->op()));
417 sizeMarker = vm().jitSizeStatistics->markStart(id, m_jit);
418 }
419 return sizeMarker;
420 };
421
422 for (auto& slowPathGenerator : m_slowPathGenerators) {
423 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), slowPathGenerator->origin().semantic);
424 auto sizeMarker = markSlowPathIfNeeded(slowPathGenerator->currentNode());
425
426 slowPathGenerator->generate(this);
427
428 if (UNLIKELY(sizeMarker))
429 vm().jitSizeStatistics->markEnd(WTFMove(*sizeMarker), m_jit);
430 }
431 for (auto& slowPathLambda : m_slowPathLambdas) {
432 Node* currentNode = slowPathLambda.currentNode;
433 m_currentNode = currentNode;
434 m_outOfLineStreamIndex = slowPathLambda.streamIndex;
435 pcToCodeOriginMapBuilder.appendItem(m_jit.labelIgnoringWatchpoints(), currentNode->origin.semantic);
436 auto sizeMarker = markSlowPathIfNeeded(currentNode);
437
438 slowPathLambda.generator();
439 m_outOfLineStreamIndex = std::nullopt;
440 if (UNLIKELY(sizeMarker))
441 vm().jitSizeStatistics->markEnd(WTFMove(*sizeMarker), m_jit);
442 }
443}
444
445void SpeculativeJIT::clearGenerationInfo()
446{
447 for (unsigned i = 0; i < m_generationInfo.size(); ++i)
448 m_generationInfo[i] = GenerationInfo();
449 m_gprs = RegisterBank<GPRInfo>();
450 m_fprs = RegisterBank<FPRInfo>();
451}
452
453SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
454{
455 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
456 Node* node = info.node();
457 DataFormat registerFormat = info.registerFormat();
458 ASSERT(registerFormat != DataFormatNone);
459 ASSERT(registerFormat != DataFormatDouble);
460
461 SilentSpillAction spillAction;
462 SilentFillAction fillAction;
463
464 if (!info.needsSpill())
465 spillAction = DoNothingForSpill;
466 else {
467#if USE(JSVALUE64)
468 ASSERT(info.gpr() == source);
469 if (registerFormat == DataFormatInt32)
470 spillAction = Store32Payload;
471 else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
472 spillAction = StorePtr;
473 else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
474 spillAction = Store64;
475 else {
476 ASSERT(registerFormat & DataFormatJS);
477 spillAction = Store64;
478 }
479#elif USE(JSVALUE32_64)
480 if (registerFormat & DataFormatJS) {
481 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
482 spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
483 } else {
484 ASSERT(info.gpr() == source);
485 spillAction = Store32Payload;
486 }
487#endif
488 }
489
490 if (registerFormat == DataFormatInt32) {
491 ASSERT(info.gpr() == source);
492 ASSERT(isJSInt32(info.registerFormat()));
493 if (node->hasConstant()) {
494 ASSERT(node->isInt32Constant());
495 fillAction = SetInt32Constant;
496 } else
497 fillAction = Load32Payload;
498 } else if (registerFormat == DataFormatBoolean) {
499#if USE(JSVALUE64)
500 RELEASE_ASSERT_NOT_REACHED();
501#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
502 fillAction = DoNothingForFill;
503#endif
504#elif USE(JSVALUE32_64)
505 ASSERT(info.gpr() == source);
506 if (node->hasConstant()) {
507 ASSERT(node->isBooleanConstant());
508 fillAction = SetBooleanConstant;
509 } else
510 fillAction = Load32Payload;
511#endif
512 } else if (registerFormat == DataFormatCell) {
513 ASSERT(info.gpr() == source);
514 if (node->hasConstant()) {
515 DFG_ASSERT(m_jit.graph(), m_currentNode, node->isCellConstant());
516 node->asCell(); // To get the assertion.
517 fillAction = SetCellConstant;
518 } else {
519#if USE(JSVALUE64)
520 fillAction = LoadPtr;
521#else
522 fillAction = Load32Payload;
523#endif
524 }
525 } else if (registerFormat == DataFormatStorage) {
526 ASSERT(info.gpr() == source);
527 fillAction = LoadPtr;
528 } else if (registerFormat == DataFormatInt52) {
529 if (node->hasConstant())
530 fillAction = SetInt52Constant;
531 else if (info.spillFormat() == DataFormatInt52)
532 fillAction = Load64;
533 else if (info.spillFormat() == DataFormatStrictInt52)
534 fillAction = Load64ShiftInt52Left;
535 else if (info.spillFormat() == DataFormatNone)
536 fillAction = Load64;
537 else {
538 RELEASE_ASSERT_NOT_REACHED();
539#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
540 fillAction = Load64; // Make GCC happy.
541#endif
542 }
543 } else if (registerFormat == DataFormatStrictInt52) {
544 if (node->hasConstant())
545 fillAction = SetStrictInt52Constant;
546 else if (info.spillFormat() == DataFormatInt52)
547 fillAction = Load64ShiftInt52Right;
548 else if (info.spillFormat() == DataFormatStrictInt52)
549 fillAction = Load64;
550 else if (info.spillFormat() == DataFormatNone)
551 fillAction = Load64;
552 else {
553 RELEASE_ASSERT_NOT_REACHED();
554#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
555 fillAction = Load64; // Make GCC happy.
556#endif
557 }
558 } else {
559 ASSERT(registerFormat & DataFormatJS);
560#if USE(JSVALUE64)
561 ASSERT(info.gpr() == source);
562 if (node->hasConstant()) {
563 if (node->isCellConstant())
564 fillAction = SetTrustedJSConstant;
565 else
566 fillAction = SetJSConstant;
567 } else if (info.spillFormat() == DataFormatInt32) {
568 ASSERT(registerFormat == DataFormatJSInt32);
569 fillAction = Load32PayloadBoxInt;
570 } else
571 fillAction = Load64;
572#else
573 ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
574 if (node->hasConstant())
575 fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
576 else if (info.payloadGPR() == source)
577 fillAction = Load32Payload;
578 else { // Fill the Tag
579 switch (info.spillFormat()) {
580 case DataFormatInt32:
581 ASSERT(registerFormat == DataFormatJSInt32);
582 fillAction = SetInt32Tag;
583 break;
584 case DataFormatCell:
585 ASSERT(registerFormat == DataFormatJSCell);
586 fillAction = SetCellTag;
587 break;
588 case DataFormatBoolean:
589 ASSERT(registerFormat == DataFormatJSBoolean);
590 fillAction = SetBooleanTag;
591 break;
592 default:
593 fillAction = Load32Tag;
594 break;
595 }
596 }
597#endif
598 }
599
600 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
601}
602
603SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
604{
605 GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
606 Node* node = info.node();
607 ASSERT(info.registerFormat() == DataFormatDouble);
608
609 SilentSpillAction spillAction;
610 SilentFillAction fillAction;
611
612 if (!info.needsSpill())
613 spillAction = DoNothingForSpill;
614 else {
615 ASSERT(!node->hasConstant());
616 ASSERT(info.spillFormat() == DataFormatNone);
617 ASSERT(info.fpr() == source);
618 spillAction = StoreDouble;
619 }
620
621#if USE(JSVALUE64)
622 if (node->hasConstant()) {
623 node->asNumber(); // To get the assertion.
624 fillAction = SetDoubleConstant;
625 } else {
626 ASSERT(info.spillFormat() == DataFormatNone || info.spillFormat() == DataFormatDouble);
627 fillAction = LoadDouble;
628 }
629#elif USE(JSVALUE32_64)
630 ASSERT(info.registerFormat() == DataFormatDouble);
631 if (node->hasConstant()) {
632 node->asNumber(); // To get the assertion.
633 fillAction = SetDoubleConstant;
634 } else
635 fillAction = LoadDouble;
636#endif
637
638 return SilentRegisterSavePlan(spillAction, fillAction, node, source);
639}
640
641void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
642{
643 switch (plan.spillAction()) {
644 case DoNothingForSpill:
645 break;
646 case Store32Tag:
647 m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
648 break;
649 case Store32Payload:
650 m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
651 break;
652 case StorePtr:
653 m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
654 break;
655#if USE(JSVALUE64)
656 case Store64:
657 m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
658 break;
659#endif
660 case StoreDouble:
661 m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
662 break;
663 default:
664 RELEASE_ASSERT_NOT_REACHED();
665 }
666}
667
668void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan)
669{
670 switch (plan.fillAction()) {
671 case DoNothingForFill:
672 break;
673 case SetInt32Constant:
674 m_jit.move(Imm32(plan.node()->asInt32()), plan.gpr());
675 break;
676#if USE(JSVALUE64)
677 case SetInt52Constant:
678 m_jit.move(Imm64(plan.node()->asAnyInt() << JSValue::int52ShiftAmount), plan.gpr());
679 break;
680 case SetStrictInt52Constant:
681 m_jit.move(Imm64(plan.node()->asAnyInt()), plan.gpr());
682 break;
683#endif // USE(JSVALUE64)
684 case SetBooleanConstant:
685 m_jit.move(TrustedImm32(plan.node()->asBoolean()), plan.gpr());
686 break;
687 case SetCellConstant:
688 ASSERT(plan.node()->constant()->value().isCell());
689 m_jit.move(TrustedImmPtr(plan.node()->constant()), plan.gpr());
690 break;
691#if USE(JSVALUE64)
692 case SetTrustedJSConstant:
693 m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
694 break;
695 case SetJSConstant:
696 m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
697 break;
698 case SetDoubleConstant:
699 m_jit.moveDouble(Imm64(reinterpretDoubleToInt64(plan.node()->asNumber())), plan.fpr());
700 break;
701 case Load32PayloadBoxInt:
702 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
703 m_jit.or64(GPRInfo::numberTagRegister, plan.gpr());
704 break;
705 case Load32PayloadConvertToInt52:
706 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
707 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
708 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
709 break;
710 case Load32PayloadSignExtend:
711 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
712 m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
713 break;
714#else
715 case SetJSConstantTag:
716 m_jit.move(Imm32(plan.node()->asJSValue().tag()), plan.gpr());
717 break;
718 case SetJSConstantPayload:
719 m_jit.move(Imm32(plan.node()->asJSValue().payload()), plan.gpr());
720 break;
721 case SetInt32Tag:
722 m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
723 break;
724 case SetCellTag:
725 m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
726 break;
727 case SetBooleanTag:
728 m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
729 break;
730 case SetDoubleConstant:
731 m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(plan.node())), plan.fpr());
732 break;
733#endif
734 case Load32Tag:
735 m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
736 break;
737 case Load32Payload:
738 m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
739 break;
740 case LoadPtr:
741 m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
742 break;
743#if USE(JSVALUE64)
744 case Load64:
745 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
746 break;
747 case Load64ShiftInt52Right:
748 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
749 m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
750 break;
751 case Load64ShiftInt52Left:
752 m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
753 m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
754 break;
755#endif
756 case LoadDouble:
757 m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
758 break;
759 default:
760 RELEASE_ASSERT_NOT_REACHED();
761 }
762}
763
764JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
765{
766 JITCompiler::JumpList result;
767
768 IndexingType indexingModeMask = IsArray | IndexingShapeMask;
769 if (arrayMode.action() == Array::Write)
770 indexingModeMask |= CopyOnWrite;
771
772 switch (arrayMode.type()) {
773 case Array::Int32:
774 case Array::Double:
775 case Array::Contiguous:
776 case Array::Undecided:
777 case Array::ArrayStorage: {
778 IndexingType shape = arrayMode.shapeMask();
779 switch (arrayMode.arrayClass()) {
780 case Array::OriginalArray:
781 case Array::OriginalCopyOnWriteArray:
782 RELEASE_ASSERT_NOT_REACHED();
783 return result;
784
785 case Array::Array:
786 m_jit.and32(TrustedImm32(indexingModeMask), tempGPR);
787 result.append(m_jit.branch32(
788 MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape)));
789 return result;
790
791 case Array::NonArray:
792 case Array::OriginalNonArray:
793 m_jit.and32(TrustedImm32(indexingModeMask), tempGPR);
794 result.append(m_jit.branch32(
795 MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
796 return result;
797
798 case Array::PossiblyArray:
799 m_jit.and32(TrustedImm32(indexingModeMask & ~IsArray), tempGPR);
800 result.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape)));
801 return result;
802 }
803
804 RELEASE_ASSERT_NOT_REACHED();
805 return result;
806 }
807
808 case Array::SlowPutArrayStorage: {
809 ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
810
811 switch (arrayMode.arrayClass()) {
812 case Array::OriginalArray:
813 case Array::OriginalCopyOnWriteArray:
814 RELEASE_ASSERT_NOT_REACHED();
815 return result;
816
817 case Array::Array:
818 result.append(
819 m_jit.branchTest32(
820 MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
821 break;
822
823 case Array::NonArray:
824 case Array::OriginalNonArray:
825 result.append(
826 m_jit.branchTest32(
827 MacroAssembler::NonZero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
828 break;
829
830 case Array::PossiblyArray:
831 break;
832 }
833
834 m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
835 m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
836 result.append(
837 m_jit.branch32(
838 MacroAssembler::Above, tempGPR,
839 TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
840 return result;
841 }
842 default:
843 CRASH();
844 break;
845 }
846
847 return result;
848}
849
850void SpeculativeJIT::checkArray(Node* node)
851{
852 ArrayMode arrayMode = node->arrayMode();
853 ASSERT(arrayMode.isSpecific());
854 ASSERT(!arrayMode.doesConversion());
855
856 SpeculateCellOperand base(this, node->child1());
857 GPRReg baseReg = base.gpr();
858
859 if (arrayMode.alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
860 // We can purge Empty check completely in this case of CheckArrayOrEmpty since CellUse only accepts SpecCell | SpecEmpty.
861 ASSERT(typeFilterFor(node->child1().useKind()) & SpecEmpty);
862 noResult(m_currentNode);
863 return;
864 }
865
866 std::optional<GPRTemporary> temp;
867 std::optional<GPRReg> tempGPR;
868 switch (arrayMode.type()) {
869 case Array::Int32:
870 case Array::Double:
871 case Array::Contiguous:
872 case Array::Undecided:
873 case Array::ArrayStorage:
874 case Array::SlowPutArrayStorage: {
875 temp.emplace(this);
876 tempGPR = temp->gpr();
877 break;
878 }
879 default:
880 break;
881 }
882
883 CCallHelpers::Jump isEmpty;
884
885#if USE(JSVALUE64)
886 if (node->op() == CheckArrayOrEmpty) {
887 if (m_interpreter.forNode(node->child1()).m_type & SpecEmpty)
888 isEmpty = m_jit.branchIfEmpty(baseReg);
889 }
890#endif
891
892 switch (arrayMode.type()) {
893 case Array::String:
894 RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
895 return;
896 case Array::Int32:
897 case Array::Double:
898 case Array::Contiguous:
899 case Array::Undecided:
900 case Array::ArrayStorage:
901 case Array::SlowPutArrayStorage: {
902 m_jit.load8(MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR.value());
903 speculationCheck(
904 BadIndexingType, JSValueSource::unboxedCell(baseReg), nullptr,
905 jumpSlowForUnwantedArrayMode(tempGPR.value(), arrayMode));
906 break;
907 }
908 case Array::DirectArguments:
909 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, DirectArgumentsType);
910 break;
911 case Array::ScopedArguments:
912 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, ScopedArgumentsType);
913 break;
914 default: {
915 DFG_ASSERT(m_graph, node, arrayMode.isSomeTypedArrayView());
916
917 if (arrayMode.type() == Array::AnyTypedArray)
918 speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), nullptr, m_jit.branchIfNotType(baseReg, JSTypeRange { JSType(FirstTypedArrayType), JSType(LastTypedArrayTypeExcludingDataView) }));
919 else
920 speculateCellTypeWithoutTypeFiltering(node->child1(), baseReg, typeForTypedArrayType(arrayMode.typedArrayType()));
921 break;
922 }
923 }
924
925 if (isEmpty.isSet())
926 isEmpty.link(&m_jit);
927 noResult(m_currentNode);
928}
929
930void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
931{
932 ASSERT(node->arrayMode().doesConversion());
933
934 GPRTemporary temp(this);
935 GPRTemporary structure;
936 GPRReg tempGPR = temp.gpr();
937 GPRReg structureGPR = InvalidGPRReg;
938
939 if (node->op() != ArrayifyToStructure) {
940 GPRTemporary realStructure(this);
941 structure.adopt(realStructure);
942 structureGPR = structure.gpr();
943 }
944
945 // We can skip all that comes next if we already have array storage.
946 MacroAssembler::JumpList slowPath;
947
948 if (node->op() == ArrayifyToStructure) {
949 ASSERT(!isCopyOnWrite(node->structure()->indexingMode()));
950 ASSERT((node->structure()->indexingType() & IndexingShapeMask) == node->arrayMode().shapeMask());
951 slowPath.append(m_jit.branchWeakStructure(
952 JITCompiler::NotEqual,
953 JITCompiler::Address(baseReg, JSCell::structureIDOffset()),
954 node->structure()));
955 } else {
956 m_jit.load8(
957 MacroAssembler::Address(baseReg, JSCell::indexingTypeAndMiscOffset()), tempGPR);
958
959 slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
960 }
961
962 addSlowPathGenerator(makeUnique<ArrayifySlowPathGenerator>(
963 slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR));
964
965 noResult(m_currentNode);
966}
967
968void SpeculativeJIT::arrayify(Node* node)
969{
970 ASSERT(node->arrayMode().isSpecific());
971
972 SpeculateCellOperand base(this, node->child1());
973
974 if (!node->child2()) {
975 arrayify(node, base.gpr(), InvalidGPRReg);
976 return;
977 }
978
979 SpeculateInt32Operand property(this, node->child2());
980
981 arrayify(node, base.gpr(), property.gpr());
982}
983
984GPRReg SpeculativeJIT::fillStorage(Edge edge)
985{
986 VirtualRegister virtualRegister = edge->virtualRegister();
987 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
988
989 switch (info.registerFormat()) {
990 case DataFormatNone: {
991 if (info.spillFormat() == DataFormatStorage) {
992 GPRReg gpr = allocate();
993 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
994 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
995 info.fillStorage(*m_stream, gpr);
996 return gpr;
997 }
998
999 // Must be a cell; fill it as a cell and then return the pointer.
1000 return fillSpeculateCell(edge);
1001 }
1002
1003 case DataFormatStorage: {
1004 GPRReg gpr = info.gpr();
1005 m_gprs.lock(gpr);
1006 return gpr;
1007 }
1008
1009 default:
1010 return fillSpeculateCell(edge);
1011 }
1012}
1013
1014void SpeculativeJIT::useChildren(Node* node)
1015{
1016 if (node->flags() & NodeHasVarArgs) {
1017 for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
1018 if (!!m_jit.graph().m_varArgChildren[childIdx])
1019 use(m_jit.graph().m_varArgChildren[childIdx]);
1020 }
1021 } else {
1022 Edge child1 = node->child1();
1023 if (!child1) {
1024 ASSERT(!node->child2() && !node->child3());
1025 return;
1026 }
1027 use(child1);
1028
1029 Edge child2 = node->child2();
1030 if (!child2) {
1031 ASSERT(!node->child3());
1032 return;
1033 }
1034 use(child2);
1035
1036 Edge child3 = node->child3();
1037 if (!child3)
1038 return;
1039 use(child3);
1040 }
1041}
1042
1043void SpeculativeJIT::compileGetById(Node* node, AccessType accessType)
1044{
1045 ASSERT(accessType == AccessType::GetById || accessType == AccessType::GetByIdDirect || accessType == AccessType::TryGetById);
1046
1047 switch (node->child1().useKind()) {
1048 case CellUse: {
1049 std::optional<GPRTemporary> stubInfo;
1050 SpeculateCellOperand base(this, node->child1());
1051 JSValueRegsTemporary result(this, Reuse, base);
1052
1053 GPRReg stubInfoGPR = InvalidGPRReg;
1054 if (JITCode::useDataIC(JITType::DFGJIT)) {
1055 stubInfo.emplace(this);
1056 stubInfoGPR = stubInfo->gpr();
1057 }
1058 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1059 JSValueRegs resultRegs = result.regs();
1060
1061 base.use();
1062
1063 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), JITCompiler::Jump(), NeedToSpill, accessType);
1064
1065 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1066 break;
1067 }
1068
1069 case UntypedUse: {
1070 std::optional<GPRTemporary> stubInfo;
1071 JSValueOperand base(this, node->child1());
1072 JSValueRegsTemporary result(this, Reuse, base);
1073
1074 GPRReg stubInfoGPR = InvalidGPRReg;
1075 if (JITCode::useDataIC(JITType::DFGJIT)) {
1076 stubInfo.emplace(this);
1077 stubInfoGPR = stubInfo->gpr();
1078 }
1079 JSValueRegs baseRegs = base.jsValueRegs();
1080 JSValueRegs resultRegs = result.regs();
1081
1082 base.use();
1083
1084 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1085
1086 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), notCell, NeedToSpill, accessType);
1087
1088 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1089 break;
1090 }
1091
1092 default:
1093 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1094 break;
1095 }
1096}
1097
1098void SpeculativeJIT::compileGetByIdFlush(Node* node, AccessType accessType)
1099{
1100 switch (node->child1().useKind()) {
1101 case CellUse: {
1102 std::optional<GPRTemporary> stubInfo;
1103 SpeculateCellOperand base(this, node->child1());
1104 JSValueRegsFlushedCallResult result(this);
1105
1106 GPRReg stubInfoGPR = InvalidGPRReg;
1107 if (JITCode::useDataIC(JITType::DFGJIT)) {
1108 stubInfo.emplace(this);
1109 stubInfoGPR = stubInfo->gpr();
1110 }
1111 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
1112 JSValueRegs resultRegs = result.regs();
1113
1114 base.use();
1115
1116 flushRegisters();
1117
1118 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), JITCompiler::Jump(), DontSpill, accessType);
1119
1120 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1121 break;
1122 }
1123
1124 case UntypedUse: {
1125 std::optional<GPRTemporary> stubInfo;
1126 JSValueOperand base(this, node->child1());
1127 JSValueRegsFlushedCallResult result(this);
1128
1129 GPRReg stubInfoGPR = InvalidGPRReg;
1130 if (JITCode::useDataIC(JITType::DFGJIT)) {
1131 stubInfo.emplace(this);
1132 stubInfoGPR = stubInfo->gpr();
1133 }
1134 JSValueRegs baseRegs = base.jsValueRegs();
1135 JSValueRegs resultRegs = result.regs();
1136
1137 base.use();
1138
1139 flushRegisters();
1140
1141 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
1142
1143 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), notCell, DontSpill, accessType);
1144
1145 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
1146 break;
1147 }
1148
1149 default:
1150 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
1151 break;
1152 }
1153}
1154
1155void SpeculativeJIT::compileDeleteById(Node* node)
1156{
1157 if (node->child1().useKind() == CellUse) {
1158 std::optional<GPRTemporary> stubInfo;
1159 SpeculateCellOperand base(this, node->child1());
1160 JSValueRegsTemporary result(this);
1161 GPRTemporary scratch(this);
1162
1163 JITCompiler::JumpList slowCases;
1164
1165 GPRReg stubInfoGPR = InvalidGPRReg;
1166 if (JITCode::useDataIC(JITType::DFGJIT)) {
1167 stubInfo.emplace(this);
1168 stubInfoGPR = stubInfo->gpr();
1169 }
1170 JSValueRegs resultRegs = result.regs();
1171 GPRReg baseGPR = base.gpr();
1172 GPRReg scratchGPR = scratch.gpr();
1173 GPRReg resultGPR = resultRegs.payloadGPR();
1174
1175 CodeOrigin codeOrigin = node->origin.semantic;
1176 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
1177 RegisterSet usedRegisters = this->usedRegisters();
1178
1179 JITDelByIdGenerator gen(
1180 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, usedRegisters, node->cacheableIdentifier(),
1181 JSValueRegs::payloadOnly(baseGPR), resultRegs, stubInfoGPR, scratchGPR);
1182
1183 gen.generateFastPath(m_jit);
1184 if (!JITCode::useDataIC(JITType::DFGJIT))
1185 slowCases.append(gen.slowPathJump());
1186
1187#if USE(JSVALUE64)
1188 std::unique_ptr<SlowPathGenerator> slowPath;
1189 if (JITCode::useDataIC(JITType::DFGJIT)) {
1190 slowPath = slowPathICCall(
1191 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationDeleteByIdOptimize,
1192 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, JSValueRegs(baseGPR), node->cacheableIdentifier().rawBits(), TrustedImm32(node->ecmaMode().value()));
1193 } else {
1194 slowPath = slowPathCall(
1195 slowCases, this, operationDeleteByIdOptimize,
1196 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), JSValueRegs(baseGPR), node->cacheableIdentifier().rawBits(), TrustedImm32(node->ecmaMode().value()));
1197 }
1198#else
1199 std::unique_ptr<SlowPathGenerator> slowPath;
1200 if (JITCode::useDataIC(JITType::DFGJIT)) {
1201 slowPath = slowPathICCall(
1202 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationDeleteByIdOptimize,
1203 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, CCallHelpers::CellValue(baseGPR), node->cacheableIdentifier().rawBits(), TrustedImm32(node->ecmaMode().value()));
1204 } else {
1205 slowPath = slowPathCall(
1206 slowCases, this, operationDeleteByIdOptimize,
1207 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), CCallHelpers::CellValue(baseGPR), node->cacheableIdentifier().rawBits(), TrustedImm32(node->ecmaMode().value()));
1208 }
1209#endif
1210
1211 m_jit.addDelById(gen, slowPath.get());
1212 addSlowPathGenerator(WTFMove(slowPath));
1213
1214 unblessedBooleanResult(resultGPR, node);
1215 return;
1216 }
1217
1218 // FIXME: We should use IC even if child1 is UntypedUse. In that case, we should emit write-barrier after the fast path of IC.
1219 // https://bugs.webkit.org/show_bug.cgi?id=209397
1220 ASSERT(node->child1().useKind() == UntypedUse);
1221 JSValueOperand base(this, node->child1());
1222
1223 JSValueRegs baseRegs = base.jsValueRegs();
1224
1225 flushRegisters();
1226 GPRFlushedCallResult result(this);
1227 GPRReg resultGPR = result.gpr();
1228 callOperation(operationDeleteByIdGeneric, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), nullptr, baseRegs, node->cacheableIdentifier().rawBits(), TrustedImm32(node->ecmaMode().value()));
1229 m_jit.exceptionCheck();
1230
1231 unblessedBooleanResult(resultGPR, node);
1232}
1233
1234void SpeculativeJIT::compileDeleteByVal(Node* node)
1235{
1236 if (node->child1().useKind() == CellUse) {
1237 std::optional<GPRTemporary> stubInfo;
1238 SpeculateCellOperand base(this, node->child1());
1239 JSValueOperand key(this, node->child2(), ManualOperandSpeculation);
1240 JSValueRegsTemporary result(this, Reuse, key);
1241 GPRTemporary scratch(this);
1242
1243 JITCompiler::JumpList slowCases;
1244
1245 GPRReg stubInfoGPR = InvalidGPRReg;
1246 if (JITCode::useDataIC(JITType::DFGJIT)) {
1247 stubInfo.emplace(this);
1248 stubInfoGPR = stubInfo->gpr();
1249 }
1250 GPRReg baseGPR = base.gpr();
1251 JSValueRegs keyRegs = key.jsValueRegs();
1252 JSValueRegs resultRegs = result.regs();
1253 GPRReg scratchGPR = scratch.gpr();
1254 GPRReg resultGPR = resultRegs.payloadGPR();
1255
1256 speculate(node, node->child2());
1257
1258 if (needsTypeCheck(node->child2(), SpecCell))
1259 slowCases.append(m_jit.branchIfNotCell(keyRegs));
1260
1261 CodeOrigin codeOrigin = node->origin.semantic;
1262 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
1263 RegisterSet usedRegisters = this->usedRegisters();
1264
1265 JITDelByValGenerator gen(
1266 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, usedRegisters,
1267 JSValueRegs::payloadOnly(baseGPR), keyRegs, resultRegs, stubInfoGPR, scratchGPR);
1268
1269 gen.generateFastPath(m_jit);
1270 if (!JITCode::useDataIC(JITType::DFGJIT))
1271 slowCases.append(gen.slowPathJump());
1272
1273#if USE(JSVALUE64)
1274 std::unique_ptr<SlowPathGenerator> slowPath;
1275 if (JITCode::useDataIC(JITType::DFGJIT)) {
1276 slowPath = slowPathICCall(
1277 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationDeleteByValOptimize,
1278 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, JSValueRegs(baseGPR), keyRegs, TrustedImm32(node->ecmaMode().value()));
1279 } else {
1280 slowPath = slowPathCall(
1281 slowCases, this, operationDeleteByValOptimize,
1282 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), JSValueRegs(baseGPR), keyRegs, TrustedImm32(node->ecmaMode().value()));
1283 }
1284#else
1285 std::unique_ptr<SlowPathGenerator> slowPath;
1286 if (JITCode::useDataIC(JITType::DFGJIT)) {
1287 slowPath = slowPathICCall(
1288 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationDeleteByValOptimize,
1289 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, CCallHelpers::CellValue(baseGPR), keyRegs, TrustedImm32(node->ecmaMode().value()));
1290 } else {
1291 slowPath = slowPathCall(
1292 slowCases, this, operationDeleteByValOptimize,
1293 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), CCallHelpers::CellValue(baseGPR), keyRegs, TrustedImm32(node->ecmaMode().value()));
1294 }
1295#endif
1296
1297 m_jit.addDelByVal(gen, slowPath.get());
1298 addSlowPathGenerator(WTFMove(slowPath));
1299
1300 unblessedBooleanResult(resultGPR, node);
1301 return;
1302 }
1303
1304 // FIXME: We should use IC even if child1 is UntypedUse. In that case, we should emit write-barrier after the fast path of IC.
1305 // https://bugs.webkit.org/show_bug.cgi?id=209397
1306 JSValueOperand base(this, node->child1());
1307 JSValueOperand key(this, node->child2());
1308
1309 JSValueRegs baseRegs = base.jsValueRegs();
1310 JSValueRegs keyRegs = key.jsValueRegs();
1311
1312 flushRegisters();
1313 GPRFlushedCallResult result(this);
1314 GPRReg resultGPR = result.gpr();
1315 callOperation(operationDeleteByValGeneric, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), nullptr, baseRegs, keyRegs, TrustedImm32(node->ecmaMode().value()));
1316 m_jit.exceptionCheck();
1317
1318 unblessedBooleanResult(resultGPR, node);
1319}
1320
1321void SpeculativeJIT::compileInById(Node* node)
1322{
1323 std::optional<GPRTemporary> stubInfo;
1324 SpeculateCellOperand base(this, node->child1());
1325 JSValueRegsTemporary result(this, Reuse, base, PayloadWord);
1326
1327 GPRReg stubInfoGPR = InvalidGPRReg;
1328 if (JITCode::useDataIC(JITType::DFGJIT)) {
1329 stubInfo.emplace(this);
1330 stubInfoGPR = stubInfo->gpr();
1331 }
1332 GPRReg baseGPR = base.gpr();
1333 JSValueRegs resultRegs = result.regs();
1334
1335 base.use();
1336
1337 CodeOrigin codeOrigin = node->origin.semantic;
1338 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
1339 RegisterSet usedRegisters = this->usedRegisters();
1340 JITInByIdGenerator gen(
1341 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, usedRegisters, node->cacheableIdentifier(),
1342 JSValueRegs::payloadOnly(baseGPR), resultRegs, stubInfoGPR);
1343 gen.generateFastPath(m_jit);
1344
1345 JITCompiler::JumpList slowCases;
1346 slowCases.append(gen.slowPathJump());
1347
1348 std::unique_ptr<SlowPathGenerator> slowPath;
1349 if (JITCode::useDataIC(JITType::DFGJIT)) {
1350 slowPath = slowPathICCall(
1351 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationInByIdOptimize,
1352 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1353 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, CCallHelpers::CellValue(baseGPR), node->cacheableIdentifier().rawBits());
1354 } else {
1355 slowPath = slowPathCall(
1356 slowCases, this, operationInByIdOptimize,
1357 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1358 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), CCallHelpers::CellValue(baseGPR), node->cacheableIdentifier().rawBits());
1359 }
1360
1361 m_jit.addInById(gen, slowPath.get());
1362 addSlowPathGenerator(WTFMove(slowPath));
1363
1364 blessedBooleanResult(resultRegs.payloadGPR(), node, UseChildrenCalledExplicitly);
1365}
1366
1367void SpeculativeJIT::compileInByVal(Node* node)
1368{
1369 SpeculateCellOperand base(this, node->child1());
1370 JSValueOperand key(this, node->child2());
1371 JSValueRegsTemporary result(this, Reuse, key);
1372 std::optional<GPRTemporary> stubInfo;
1373
1374 GPRReg stubInfoGPR = InvalidGPRReg;
1375 if (JITCode::useDataIC(JITType::DFGJIT)) {
1376 stubInfo.emplace(this);
1377 stubInfoGPR = stubInfo->gpr();
1378 }
1379 GPRReg baseGPR = base.gpr();
1380 JSValueRegs keyRegs = key.jsValueRegs();
1381 JSValueRegs resultRegs = result.regs();
1382
1383 base.use();
1384 key.use();
1385
1386 CCallHelpers::JumpList slowCases;
1387
1388 CodeOrigin codeOrigin = node->origin.semantic;
1389 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
1390 RegisterSet usedRegisters = this->usedRegisters();
1391 JITInByValGenerator gen(
1392 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::InByVal, usedRegisters,
1393 JSValueRegs::payloadOnly(baseGPR), keyRegs, resultRegs, stubInfoGPR);
1394 gen.generateFastPath(m_jit);
1395 if (!JITCode::useDataIC(JITType::DFGJIT))
1396 slowCases.append(gen.slowPathJump());
1397
1398 std::unique_ptr<SlowPathGenerator> slowPath;
1399 if (JITCode::useDataIC(JITType::DFGJIT)) {
1400 slowPath = slowPathICCall(
1401 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationInByValOptimize,
1402 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1403 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, nullptr, CCallHelpers::CellValue(baseGPR), keyRegs);
1404 } else {
1405 slowPath = slowPathCall(
1406 slowCases, this, operationInByValOptimize,
1407 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1408 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), nullptr, CCallHelpers::CellValue(baseGPR), keyRegs);
1409 }
1410
1411 m_jit.addInByVal(gen, slowPath.get());
1412 addSlowPathGenerator(WTFMove(slowPath));
1413
1414 blessedBooleanResult(resultRegs.payloadGPR(), node, UseChildrenCalledExplicitly);
1415}
1416
1417void SpeculativeJIT::compileHasPrivate(Node* node, AccessType type)
1418{
1419 SpeculateCellOperand base(this, node->child1());
1420 SpeculateCellOperand propertyOrBrand(this, node->child2());
1421 JSValueRegsTemporary result(this, Reuse, base);
1422 std::optional<GPRTemporary> stubInfo;
1423
1424 GPRReg stubInfoGPR = InvalidGPRReg;
1425 if (JITCode::useDataIC(JITType::DFGJIT)) {
1426 stubInfo.emplace(this);
1427 stubInfoGPR = stubInfo->gpr();
1428 }
1429 GPRReg baseGPR = base.gpr();
1430 GPRReg propertyOrBrandGPR = propertyOrBrand.gpr();
1431 JSValueRegs resultRegs = result.regs();
1432
1433 speculateSymbol(node->child2(), propertyOrBrandGPR);
1434
1435 base.use();
1436 propertyOrBrand.use();
1437
1438 CCallHelpers::JumpList slowCases;
1439
1440 CodeOrigin codeOrigin = node->origin.semantic;
1441 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
1442 RegisterSet usedRegisters = this->usedRegisters();
1443 JITInByValGenerator gen(
1444 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, type, usedRegisters,
1445 JSValueRegs::payloadOnly(baseGPR), JSValueRegs::payloadOnly(propertyOrBrandGPR), resultRegs, stubInfoGPR);
1446
1447 gen.stubInfo()->propertyIsSymbol = true;
1448 gen.generateFastPath(m_jit);
1449 if (!JITCode::useDataIC(JITType::DFGJIT))
1450 slowCases.append(gen.slowPathJump());
1451
1452 std::unique_ptr<SlowPathGenerator> slowPath;
1453 if (JITCode::useDataIC(JITType::DFGJIT)) {
1454 slowPath = slowPathICCall(
1455 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), type == AccessType::HasPrivateName ? operationHasPrivateNameOptimize : operationHasPrivateBrandOptimize,
1456 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1457 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, CCallHelpers::CellValue(baseGPR), CCallHelpers::CellValue(propertyOrBrandGPR));
1458 } else {
1459 slowPath = slowPathCall(
1460 slowCases, this, type == AccessType::HasPrivateName ? operationHasPrivateNameOptimize : operationHasPrivateBrandOptimize,
1461 NeedToSpill, ExceptionCheckRequirement::CheckNeeded,
1462 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), CCallHelpers::CellValue(baseGPR), CCallHelpers::CellValue(propertyOrBrandGPR));
1463 }
1464
1465 m_jit.addInByVal(gen, slowPath.get());
1466 addSlowPathGenerator(WTFMove(slowPath));
1467
1468 blessedBooleanResult(resultRegs.payloadGPR(), node, UseChildrenCalledExplicitly);
1469}
1470
1471void SpeculativeJIT::compileHasPrivateName(Node* node)
1472{
1473 compileHasPrivate(node, AccessType::HasPrivateName);
1474}
1475
1476void SpeculativeJIT::compileHasPrivateBrand(Node* node)
1477{
1478 compileHasPrivate(node, AccessType::HasPrivateBrand);
1479}
1480
1481void SpeculativeJIT::compilePushWithScope(Node* node)
1482{
1483 SpeculateCellOperand currentScope(this, node->child1());
1484 GPRReg currentScopeGPR = currentScope.gpr();
1485
1486 GPRFlushedCallResult result(this);
1487 GPRReg resultGPR = result.gpr();
1488
1489 auto objectEdge = node->child2();
1490 if (objectEdge.useKind() == ObjectUse) {
1491 SpeculateCellOperand object(this, objectEdge);
1492 GPRReg objectGPR = object.gpr();
1493 speculateObject(objectEdge, objectGPR);
1494
1495 flushRegisters();
1496 callOperation(operationPushWithScopeObject, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), currentScopeGPR, objectGPR);
1497 // No exception check here as we did not have to call toObject().
1498 } else {
1499 ASSERT(objectEdge.useKind() == UntypedUse);
1500 JSValueOperand object(this, objectEdge);
1501 JSValueRegs objectRegs = object.jsValueRegs();
1502
1503 flushRegisters();
1504 callOperation(operationPushWithScope, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), currentScopeGPR, objectRegs);
1505 m_jit.exceptionCheck();
1506 }
1507
1508 cellResult(resultGPR, node);
1509}
1510
1511bool SpeculativeJIT::genericJSValueStrictEq(Node* node, bool invert)
1512{
1513 unsigned branchIndexInBlock = detectPeepHoleBranch();
1514 if (branchIndexInBlock != UINT_MAX) {
1515 Node* branchNode = m_block->at(branchIndexInBlock);
1516
1517 ASSERT(node->adjustedRefCount() == 1);
1518
1519 nonSpeculativePeepholeStrictEq(node, branchNode, invert);
1520
1521 m_indexInBlock = branchIndexInBlock;
1522 m_currentNode = branchNode;
1523
1524 return true;
1525 }
1526
1527 genericJSValueNonPeepholeStrictEq(node, invert);
1528
1529 return false;
1530}
1531
1532static const char* dataFormatString(DataFormat format)
1533{
1534 // These values correspond to the DataFormat enum.
1535 const char* strings[] = {
1536 "[ ]",
1537 "[ i]",
1538 "[ d]",
1539 "[ c]",
1540 "Err!",
1541 "Err!",
1542 "Err!",
1543 "Err!",
1544 "[J ]",
1545 "[Ji]",
1546 "[Jd]",
1547 "[Jc]",
1548 "Err!",
1549 "Err!",
1550 "Err!",
1551 "Err!",
1552 };
1553 return strings[format];
1554}
1555
1556void SpeculativeJIT::dump(const char* label)
1557{
1558 if (label)
1559 dataLogF("<%s>\n", label);
1560
1561 dataLogF(" gprs:\n");
1562 m_gprs.dump();
1563 dataLogF(" fprs:\n");
1564 m_fprs.dump();
1565 dataLogF(" VirtualRegisters:\n");
1566 for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
1567 GenerationInfo& info = m_generationInfo[i];
1568 if (info.alive())
1569 dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
1570 else
1571 dataLogF(" % 3d:[__][__]", i);
1572 if (info.registerFormat() == DataFormatDouble)
1573 dataLogF(":fpr%d\n", info.fpr());
1574 else if (info.registerFormat() != DataFormatNone
1575#if USE(JSVALUE32_64)
1576 && !(info.registerFormat() & DataFormatJS)
1577#endif
1578 ) {
1579 ASSERT(info.gpr() != InvalidGPRReg);
1580 dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
1581 } else
1582 dataLogF("\n");
1583 }
1584 if (label)
1585 dataLogF("</%s>\n", label);
1586}
1587
1588GPRTemporary::GPRTemporary()
1589 : m_jit(nullptr)
1590 , m_gpr(InvalidGPRReg)
1591{
1592}
1593
1594GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
1595 : m_jit(jit)
1596 , m_gpr(InvalidGPRReg)
1597{
1598 m_gpr = m_jit->allocate();
1599}
1600
1601GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
1602 : m_jit(jit)
1603 , m_gpr(InvalidGPRReg)
1604{
1605 m_gpr = m_jit->allocate(specific);
1606}
1607
1608#if USE(JSVALUE32_64)
1609GPRTemporary::GPRTemporary(
1610 SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord which)
1611 : m_jit(jit)
1612 , m_gpr(InvalidGPRReg)
1613{
1614 if (!op1.isDouble() && m_jit->canReuse(op1.node()))
1615 m_gpr = m_jit->reuse(op1.gpr(which));
1616 else
1617 m_gpr = m_jit->allocate();
1618}
1619#else // USE(JSVALUE32_64)
1620GPRTemporary::GPRTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& op1, WhichValueWord)
1621 : GPRTemporary(jit, Reuse, op1)
1622{
1623}
1624#endif
1625
1626JSValueRegsTemporary::JSValueRegsTemporary() { }
1627
1628JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit)
1629#if USE(JSVALUE64)
1630 : m_gpr(jit)
1631#else
1632 : m_payloadGPR(jit)
1633 , m_tagGPR(jit)
1634#endif
1635{
1636}
1637
1638#if USE(JSVALUE64)
1639template<typename T>
1640JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord)
1641 : m_gpr(jit, Reuse, operand)
1642{
1643}
1644#else
1645template<typename T>
1646JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, T& operand, WhichValueWord resultWord)
1647{
1648 if (resultWord == PayloadWord) {
1649 m_payloadGPR = GPRTemporary(jit, Reuse, operand);
1650 m_tagGPR = GPRTemporary(jit);
1651 } else {
1652 m_payloadGPR = GPRTemporary(jit);
1653 m_tagGPR = GPRTemporary(jit, Reuse, operand);
1654 }
1655}
1656#endif
1657
1658#if USE(JSVALUE64)
1659JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1660{
1661 m_gpr = GPRTemporary(jit, Reuse, operand);
1662}
1663#else
1664JSValueRegsTemporary::JSValueRegsTemporary(SpeculativeJIT* jit, ReuseTag, JSValueOperand& operand)
1665{
1666 if (jit->canReuse(operand.node())) {
1667 m_payloadGPR = GPRTemporary(jit, Reuse, operand, PayloadWord);
1668 m_tagGPR = GPRTemporary(jit, Reuse, operand, TagWord);
1669 } else {
1670 m_payloadGPR = GPRTemporary(jit);
1671 m_tagGPR = GPRTemporary(jit);
1672 }
1673}
1674#endif
1675
1676JSValueRegsTemporary::~JSValueRegsTemporary() { }
1677
1678JSValueRegs JSValueRegsTemporary::regs()
1679{
1680#if USE(JSVALUE64)
1681 return JSValueRegs(m_gpr.gpr());
1682#else
1683 return JSValueRegs(m_tagGPR.gpr(), m_payloadGPR.gpr());
1684#endif
1685}
1686
1687void GPRTemporary::adopt(GPRTemporary& other)
1688{
1689 ASSERT(!m_jit);
1690 ASSERT(m_gpr == InvalidGPRReg);
1691 ASSERT(other.m_jit);
1692 ASSERT(other.m_gpr != InvalidGPRReg);
1693 m_jit = other.m_jit;
1694 m_gpr = other.m_gpr;
1695 other.m_jit = nullptr;
1696 other.m_gpr = InvalidGPRReg;
1697}
1698
1699FPRTemporary::FPRTemporary(FPRTemporary&& other)
1700{
1701 ASSERT(other.m_jit);
1702 ASSERT(other.m_fpr != InvalidFPRReg);
1703 m_jit = other.m_jit;
1704 m_fpr = other.m_fpr;
1705
1706 other.m_jit = nullptr;
1707}
1708
1709FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
1710 : m_jit(jit)
1711 , m_fpr(InvalidFPRReg)
1712{
1713 m_fpr = m_jit->fprAllocate();
1714}
1715
1716FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
1717 : m_jit(jit)
1718 , m_fpr(InvalidFPRReg)
1719{
1720 if (m_jit->canReuse(op1.node()))
1721 m_fpr = m_jit->reuse(op1.fpr());
1722 else
1723 m_fpr = m_jit->fprAllocate();
1724}
1725
1726FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
1727 : m_jit(jit)
1728 , m_fpr(InvalidFPRReg)
1729{
1730 if (m_jit->canReuse(op1.node()))
1731 m_fpr = m_jit->reuse(op1.fpr());
1732 else if (m_jit->canReuse(op2.node()))
1733 m_fpr = m_jit->reuse(op2.fpr());
1734 else if (m_jit->canReuse(op1.node(), op2.node()) && op1.fpr() == op2.fpr())
1735 m_fpr = m_jit->reuse(op1.fpr());
1736 else
1737 m_fpr = m_jit->fprAllocate();
1738}
1739
1740#if USE(JSVALUE32_64)
1741FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
1742 : m_jit(jit)
1743 , m_fpr(InvalidFPRReg)
1744{
1745 if (op1.isDouble() && m_jit->canReuse(op1.node()))
1746 m_fpr = m_jit->reuse(op1.fpr());
1747 else
1748 m_fpr = m_jit->fprAllocate();
1749}
1750#endif
1751
1752void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
1753{
1754 BasicBlock* taken = branchNode->branchData()->taken.block;
1755 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1756
1757 if (taken == nextBlock()) {
1758 condition = MacroAssembler::invert(condition);
1759 std::swap(taken, notTaken);
1760 }
1761
1762 SpeculateDoubleOperand op1(this, node->child1());
1763 SpeculateDoubleOperand op2(this, node->child2());
1764
1765 branchDouble(condition, op1.fpr(), op2.fpr(), taken);
1766 jump(notTaken);
1767}
1768
1769void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
1770{
1771 BasicBlock* taken = branchNode->branchData()->taken.block;
1772 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1773
1774 MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
1775
1776 if (taken == nextBlock()) {
1777 condition = MacroAssembler::NotEqual;
1778 BasicBlock* tmp = taken;
1779 taken = notTaken;
1780 notTaken = tmp;
1781 }
1782
1783 SpeculateCellOperand op1(this, node->child1());
1784 SpeculateCellOperand op2(this, node->child2());
1785
1786 GPRReg op1GPR = op1.gpr();
1787 GPRReg op2GPR = op2.gpr();
1788
1789 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
1790 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1791 speculationCheck(
1792 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), m_jit.branchIfNotObject(op1GPR));
1793 }
1794 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1795 speculationCheck(
1796 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), m_jit.branchIfNotObject(op2GPR));
1797 }
1798 } else {
1799 if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
1800 speculationCheck(
1801 BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1802 m_jit.branchIfNotObject(op1GPR));
1803 }
1804 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
1805 m_jit.branchTest8(
1806 MacroAssembler::NonZero,
1807 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
1808 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1809
1810 if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
1811 speculationCheck(
1812 BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1813 m_jit.branchIfNotObject(op2GPR));
1814 }
1815 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
1816 m_jit.branchTest8(
1817 MacroAssembler::NonZero,
1818 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
1819 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
1820 }
1821
1822 branchPtr(condition, op1GPR, op2GPR, taken);
1823 jump(notTaken);
1824}
1825
1826void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1827{
1828 BasicBlock* taken = branchNode->branchData()->taken.block;
1829 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
1830
1831 // The branch instruction will branch to the taken block.
1832 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
1833 if (taken == nextBlock()) {
1834 condition = JITCompiler::invert(condition);
1835 BasicBlock* tmp = taken;
1836 taken = notTaken;
1837 notTaken = tmp;
1838 }
1839
1840 if (node->child1()->isInt32Constant()) {
1841 int32_t imm = node->child1()->asInt32();
1842 SpeculateBooleanOperand op2(this, node->child2());
1843 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
1844 } else if (node->child2()->isInt32Constant()) {
1845 SpeculateBooleanOperand op1(this, node->child1());
1846 int32_t imm = node->child2()->asInt32();
1847 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
1848 } else {
1849 SpeculateBooleanOperand op1(this, node->child1());
1850 SpeculateBooleanOperand op2(this, node->child2());
1851 branch32(condition, op1.gpr(), op2.gpr(), taken);
1852 }
1853
1854 jump(notTaken);
1855}
1856
1857void SpeculativeJIT::compileStringSlice(Node* node)
1858{
1859 SpeculateCellOperand string(this, node->child1());
1860
1861 GPRReg stringGPR = string.gpr();
1862
1863 speculateString(node->child1(), stringGPR);
1864
1865 SpeculateInt32Operand start(this, node->child2());
1866 GPRReg startGPR = start.gpr();
1867
1868 std::optional<SpeculateInt32Operand> end;
1869 std::optional<GPRReg> endGPR;
1870 if (node->child3()) {
1871 end.emplace(this, node->child3());
1872 endGPR.emplace(end->gpr());
1873 }
1874
1875 GPRTemporary temp(this);
1876 GPRTemporary temp2(this);
1877 GPRTemporary startIndex(this);
1878
1879 GPRReg tempGPR = temp.gpr();
1880 GPRReg temp2GPR = temp2.gpr();
1881 GPRReg startIndexGPR = startIndex.gpr();
1882
1883 m_jit.loadPtr(CCallHelpers::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1884 auto isRope = m_jit.branchIfRopeStringImpl(tempGPR);
1885 {
1886 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), temp2GPR);
1887
1888 emitPopulateSliceIndex(node->child2(), startGPR, temp2GPR, startIndexGPR);
1889
1890 if (node->child3())
1891 emitPopulateSliceIndex(node->child3(), endGPR.value(), temp2GPR, tempGPR);
1892 else
1893 m_jit.move(temp2GPR, tempGPR);
1894 }
1895
1896 CCallHelpers::JumpList doneCases;
1897 CCallHelpers::JumpList slowCases;
1898
1899 VM& vm = this->vm();
1900 auto nonEmptyCase = m_jit.branch32(MacroAssembler::Below, startIndexGPR, tempGPR);
1901 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(vm)), tempGPR);
1902 doneCases.append(m_jit.jump());
1903
1904 nonEmptyCase.link(&m_jit);
1905 m_jit.sub32(startIndexGPR, tempGPR); // the size of the sliced string.
1906 slowCases.append(m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(1)));
1907
1908 // Refill StringImpl* here.
1909 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), temp2GPR);
1910 m_jit.loadPtr(MacroAssembler::Address(temp2GPR, StringImpl::dataOffset()), tempGPR);
1911
1912 // Load the character into scratchReg
1913 m_jit.zeroExtend32ToWord(startIndexGPR, startIndexGPR);
1914 auto is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(temp2GPR, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
1915
1916 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesOne, 0), tempGPR);
1917 auto cont8Bit = m_jit.jump();
1918
1919 is16Bit.link(&m_jit);
1920 m_jit.load16(MacroAssembler::BaseIndex(tempGPR, startIndexGPR, MacroAssembler::TimesTwo, 0), tempGPR);
1921
1922 auto bigCharacter = m_jit.branch32(MacroAssembler::Above, tempGPR, TrustedImm32(maxSingleCharacterString));
1923
1924 // 8 bit string values don't need the isASCII check.
1925 cont8Bit.link(&m_jit);
1926
1927 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), tempGPR);
1928 m_jit.addPtr(TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), tempGPR);
1929 m_jit.loadPtr(tempGPR, tempGPR);
1930
1931 addSlowPathGenerator(slowPathCall(bigCharacter, this, operationSingleCharacterString, tempGPR, &vm, tempGPR));
1932
1933 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringSubstr, tempGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, startIndexGPR, tempGPR));
1934
1935 if (endGPR)
1936 addSlowPathGenerator(slowPathCall(isRope, this, operationStringSlice, tempGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, startGPR, *endGPR));
1937 else
1938 addSlowPathGenerator(slowPathCall(isRope, this, operationStringSlice, tempGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, startGPR, TrustedImm32(std::numeric_limits<int32_t>::max())));
1939
1940 doneCases.link(&m_jit);
1941 cellResult(tempGPR, node);
1942}
1943
1944void SpeculativeJIT::compileToLowerCase(Node* node)
1945{
1946 ASSERT(node->op() == ToLowerCase);
1947 SpeculateCellOperand string(this, node->child1());
1948 GPRTemporary temp(this);
1949 GPRTemporary index(this);
1950 GPRTemporary charReg(this);
1951 GPRTemporary length(this);
1952
1953 GPRReg stringGPR = string.gpr();
1954 GPRReg tempGPR = temp.gpr();
1955 GPRReg indexGPR = index.gpr();
1956 GPRReg charGPR = charReg.gpr();
1957 GPRReg lengthGPR = length.gpr();
1958
1959 speculateString(node->child1(), stringGPR);
1960
1961 CCallHelpers::JumpList slowPath;
1962
1963 m_jit.move(TrustedImmPtr(nullptr), indexGPR);
1964
1965 m_jit.loadPtr(MacroAssembler::Address(stringGPR, JSString::offsetOfValue()), tempGPR);
1966 slowPath.append(m_jit.branchIfRopeStringImpl(tempGPR));
1967 slowPath.append(m_jit.branchTest32(
1968 MacroAssembler::Zero, MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
1969 MacroAssembler::TrustedImm32(StringImpl::flagIs8Bit())));
1970 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
1971 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), tempGPR);
1972
1973 auto loopStart = m_jit.label();
1974 auto loopDone = m_jit.branch32(CCallHelpers::AboveOrEqual, indexGPR, lengthGPR);
1975 m_jit.load8(MacroAssembler::BaseIndex(tempGPR, indexGPR, MacroAssembler::TimesOne), charGPR);
1976 slowPath.append(m_jit.branchTest32(CCallHelpers::NonZero, charGPR, TrustedImm32(~0x7F)));
1977 m_jit.sub32(TrustedImm32('A'), charGPR);
1978 slowPath.append(m_jit.branch32(CCallHelpers::BelowOrEqual, charGPR, TrustedImm32('Z' - 'A')));
1979
1980 m_jit.add32(TrustedImm32(1), indexGPR);
1981 m_jit.jump().linkTo(loopStart, &m_jit);
1982
1983 slowPath.link(&m_jit);
1984 silentSpillAllRegisters(lengthGPR);
1985 callOperation(operationToLowerCase, lengthGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, indexGPR);
1986 silentFillAllRegisters();
1987 m_jit.exceptionCheck();
1988 auto done = m_jit.jump();
1989
1990 loopDone.link(&m_jit);
1991 m_jit.move(stringGPR, lengthGPR);
1992
1993 done.link(&m_jit);
1994 cellResult(lengthGPR, node);
1995}
1996
1997void SpeculativeJIT::compilePeepHoleInt32Branch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
1998{
1999 BasicBlock* taken = branchNode->branchData()->taken.block;
2000 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
2001
2002 // The branch instruction will branch to the taken block.
2003 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
2004 if (taken == nextBlock()) {
2005 condition = JITCompiler::invert(condition);
2006 BasicBlock* tmp = taken;
2007 taken = notTaken;
2008 notTaken = tmp;
2009 }
2010
2011 if (node->child1()->isInt32Constant()) {
2012 int32_t imm = node->child1()->asInt32();
2013 SpeculateInt32Operand op2(this, node->child2());
2014 branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
2015 } else if (node->child2()->isInt32Constant()) {
2016 SpeculateInt32Operand op1(this, node->child1());
2017 int32_t imm = node->child2()->asInt32();
2018 branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
2019 } else {
2020 SpeculateInt32Operand op1(this, node->child1());
2021 SpeculateInt32Operand op2(this, node->child2());
2022 branch32(condition, op1.gpr(), op2.gpr(), taken);
2023 }
2024
2025 jump(notTaken);
2026}
2027
2028// Returns true if the compare is fused with a subsequent branch.
2029bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_GJJ operation)
2030{
2031 // Fused compare & branch.
2032 unsigned branchIndexInBlock = detectPeepHoleBranch();
2033 if (branchIndexInBlock != UINT_MAX) {
2034 Node* branchNode = m_block->at(branchIndexInBlock);
2035
2036 // detectPeepHoleBranch currently only permits the branch to be the very next node,
2037 // so can be no intervening nodes to also reference the compare.
2038 ASSERT(node->adjustedRefCount() == 1);
2039
2040 if (node->isBinaryUseKind(Int32Use))
2041 compilePeepHoleInt32Branch(node, branchNode, condition);
2042#if USE(BIGINT32)
2043 else if (node->isBinaryUseKind(BigInt32Use))
2044 compilePeepHoleBigInt32Branch(node, branchNode, condition);
2045#endif
2046#if USE(JSVALUE64)
2047 else if (node->isBinaryUseKind(Int52RepUse))
2048 compilePeepHoleInt52Branch(node, branchNode, condition);
2049#endif // USE(JSVALUE64)
2050 else if (node->isBinaryUseKind(StringUse) || node->isBinaryUseKind(StringIdentUse)) {
2051 // Use non-peephole comparison, for now.
2052 return false;
2053 } else if (node->isBinaryUseKind(DoubleRepUse))
2054 compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
2055 else if (node->op() == CompareEq) {
2056 if (node->isBinaryUseKind(BooleanUse))
2057 compilePeepHoleBooleanBranch(node, branchNode, condition);
2058 else if (node->isBinaryUseKind(SymbolUse))
2059 compilePeepHoleSymbolEquality(node, branchNode);
2060 else if (node->isBinaryUseKind(ObjectUse))
2061 compilePeepHoleObjectEquality(node, branchNode);
2062 else if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse))
2063 compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
2064 else if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse))
2065 compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
2066 else if (!needsTypeCheck(node->child1(), SpecOther))
2067 nonSpeculativePeepholeBranchNullOrUndefined(node->child2(), branchNode);
2068 else if (!needsTypeCheck(node->child2(), SpecOther))
2069 nonSpeculativePeepholeBranchNullOrUndefined(node->child1(), branchNode);
2070 else {
2071 genericJSValuePeepholeBranch(node, branchNode, condition, operation);
2072 return true;
2073 }
2074 } else {
2075 genericJSValuePeepholeBranch(node, branchNode, condition, operation);
2076 return true;
2077 }
2078
2079 use(node->child1());
2080 use(node->child2());
2081 m_indexInBlock = branchIndexInBlock;
2082 m_currentNode = branchNode;
2083 return true;
2084 }
2085 return false;
2086}
2087
2088void SpeculativeJIT::noticeOSRBirth(Node* node)
2089{
2090 if (!node->hasVirtualRegister())
2091 return;
2092
2093 VirtualRegister virtualRegister = node->virtualRegister();
2094 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2095
2096 info.noticeOSRBirth(*m_stream, node, virtualRegister);
2097}
2098
2099void SpeculativeJIT::compileLoopHint(Node* node)
2100{
2101 if (UNLIKELY(Options::returnEarlyFromInfiniteLoopsForFuzzing())) {
2102 bool emitEarlyReturn = true;
2103 node->origin.semantic.walkUpInlineStack([&](CodeOrigin origin) {
2104 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(origin);
2105 if (!baselineCodeBlock->loopHintsAreEligibleForFuzzingEarlyReturn())
2106 emitEarlyReturn = false;
2107 });
2108 if (emitEarlyReturn) {
2109 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
2110 BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
2111 const Instruction* instruction = baselineCodeBlock->instructions().at(bytecodeIndex.offset()).ptr();
2112
2113 uintptr_t* ptr = vm().getLoopHintExecutionCounter(instruction);
2114 m_jit.pushToSave(GPRInfo::regT0);
2115 m_jit.loadPtr(ptr, GPRInfo::regT0);
2116 auto skipEarlyReturn = m_jit.branchPtr(CCallHelpers::Below, GPRInfo::regT0, CCallHelpers::TrustedImmPtr(Options::earlyReturnFromInfiniteLoopsLimit()));
2117
2118 if constexpr (validateDFGDoesGC) {
2119 if (Options::validateDoesGC()) {
2120 // We need to mock what a Return does: claims to GC.
2121 m_jit.move(CCallHelpers::TrustedImmPtr(vm().heap.addressOfDoesGC()), GPRInfo::regT0);
2122 m_jit.store32(CCallHelpers::TrustedImm32(DoesGCCheck::encode(true, DoesGCCheck::Special::Uninitialized)), CCallHelpers::Address(GPRInfo::regT0));
2123 }
2124 }
2125
2126 m_jit.popToRestore(GPRInfo::regT0);
2127#if USE(JSVALUE64)
2128 JSValueRegs resultRegs(GPRInfo::returnValueGPR);
2129#else
2130 JSValueRegs resultRegs(GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
2131#endif
2132 m_jit.moveValue(baselineCodeBlock->globalObject(), resultRegs);
2133 m_jit.emitRestoreCalleeSaves();
2134 m_jit.emitFunctionEpilogue();
2135 m_jit.ret();
2136
2137 skipEarlyReturn.link(&m_jit);
2138 m_jit.addPtr(CCallHelpers::TrustedImm32(1), GPRInfo::regT0);
2139 m_jit.storePtr(GPRInfo::regT0, ptr);
2140 m_jit.popToRestore(GPRInfo::regT0);
2141 }
2142 }
2143
2144 noResult(node);
2145}
2146
2147void SpeculativeJIT::compileMovHint(Node* node)
2148{
2149 ASSERT(node->containsMovHint());
2150
2151 Node* child = node->child1().node();
2152 noticeOSRBirth(child);
2153
2154 m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->unlinkedOperand()));
2155}
2156
2157void SpeculativeJIT::compileCheckDetached(Node* node)
2158{
2159 SpeculateCellOperand base(this, node->child1());
2160 GPRReg baseReg = base.gpr();
2161
2162 speculationCheck(
2163 BadIndexingType, JSValueSource::unboxedCell(baseReg), node->child1(),
2164 m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfVector())));
2165
2166 noResult(node);
2167}
2168
2169void SpeculativeJIT::bail(AbortReason reason)
2170{
2171 if (verboseCompilationEnabled())
2172 dataLog("Bailing compilation.\n");
2173 m_compileOkay = true;
2174 m_jit.abortWithReason(reason, m_lastGeneratedNode);
2175 clearGenerationInfo();
2176}
2177
2178void SpeculativeJIT::compileCurrentBlock()
2179{
2180 ASSERT(m_compileOkay);
2181
2182 if (!m_block)
2183 return;
2184
2185 ASSERT(m_block->isReachable);
2186
2187 m_jit.blockHeads()[m_block->index] = m_jit.label();
2188
2189 if (!m_block->intersectionOfCFAHasVisited) {
2190 // Don't generate code for basic blocks that are unreachable according to CFA.
2191 // But to be sure that nobody has generated a jump to this block, drop in a
2192 // breakpoint here.
2193 m_jit.abortWithReason(DFGUnreachableBasicBlock);
2194 return;
2195 }
2196
2197 if (m_block->isCatchEntrypoint) {
2198 m_jit.addPtr(CCallHelpers::TrustedImm32(-(m_jit.graph().frameRegisterCount() * sizeof(Register))), GPRInfo::callFrameRegister, CCallHelpers::stackPointerRegister);
2199 m_jit.emitSaveCalleeSaves();
2200 m_jit.emitMaterializeTagCheckRegisters();
2201 m_jit.emitPutToCallFrameHeader(m_jit.codeBlock(), CallFrameSlot::codeBlock);
2202 }
2203
2204 m_stream->appendAndLog(VariableEvent::reset());
2205
2206 m_jit.jitAssertHasValidCallFrame();
2207 m_jit.jitAssertTagsInPlace();
2208 m_jit.jitAssertArgumentCountSane();
2209
2210 m_state.reset();
2211 m_state.beginBasicBlock(m_block);
2212
2213 for (size_t i = m_block->variablesAtHead.size(); i--;) {
2214 Operand operand = m_block->variablesAtHead.operandForIndex(i);
2215 Node* node = m_block->variablesAtHead[i];
2216 if (!node)
2217 continue; // No need to record dead SetLocal's.
2218
2219 VariableAccessData* variable = node->variableAccessData();
2220 DataFormat format;
2221 if (!node->refCount())
2222 continue; // No need to record dead SetLocal's.
2223 format = dataFormatFor(variable->flushFormat());
2224 DFG_ASSERT(m_jit.graph(), node, !operand.isArgument() || operand.virtualRegister().toArgument() >= 0);
2225 m_stream->appendAndLog(VariableEvent::setLocal(operand, variable->machineLocal(), format));
2226 }
2227
2228 m_origin = NodeOrigin();
2229
2230 if (Options::validateDFGClobberize()) {
2231 bool clobberedWorld = m_block->predecessors.isEmpty() || m_block->isOSRTarget || m_block->isCatchEntrypoint;
2232 auto validateClobberize = [&] () {
2233 clobberedWorld = true;
2234 };
2235
2236 for (auto* predecessor : m_block->predecessors) {
2237 Node* terminal = predecessor->terminal();
2238 // We sometimes fuse compare followed by branch.
2239 if (terminal->isBranch())
2240 terminal = terminal->child1().node();
2241 clobberize(m_graph, terminal, [] (auto...) { }, [] (auto...) { }, [] (auto...) { }, validateClobberize);
2242 }
2243
2244 if (!clobberedWorld) {
2245 auto ok = m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::AbsoluteAddress(&vm().didEnterVM));
2246 m_jit.breakpoint();
2247 ok.link(&m_jit);
2248 } else
2249 m_jit.store8(TrustedImm32(0), &vm().didEnterVM);
2250 }
2251
2252 for (m_indexInBlock = 0; m_indexInBlock < m_block->size(); ++m_indexInBlock) {
2253 m_currentNode = m_block->at(m_indexInBlock);
2254
2255 // We may have hit a contradiction that the CFA was aware of but that the JIT
2256 // didn't cause directly.
2257 if (!m_state.isValid()) {
2258 bail(DFGBailedAtTopOfBlock);
2259 return;
2260 }
2261
2262 m_interpreter.startExecuting();
2263 m_interpreter.executeKnownEdgeTypes(m_currentNode);
2264 m_jit.setForNode(m_currentNode);
2265 m_origin = m_currentNode->origin;
2266 m_lastGeneratedNode = m_currentNode->op();
2267
2268 ASSERT(m_currentNode->shouldGenerate());
2269
2270 if (verboseCompilationEnabled())
2271 dataLogLn("SpeculativeJIT generating Node @", (int)m_currentNode->index(), " (", m_currentNode->origin.semantic.bytecodeIndex().offset(), ") at JIT offset 0x", m_jit.debugOffset());
2272
2273 if (Options::validateDFGExceptionHandling() && (mayExit(m_jit.graph(), m_currentNode) != DoesNotExit || m_currentNode->isTerminal()))
2274 m_jit.jitReleaseAssertNoException(m_jit.vm());
2275
2276 m_jit.pcToCodeOriginMapBuilder().appendItem(m_jit.labelIgnoringWatchpoints(), m_origin.semantic);
2277
2278 if (m_indexInBlock && Options::validateDFGClobberize()) {
2279 bool clobberedWorld = false;
2280 auto validateClobberize = [&] () {
2281 clobberedWorld = true;
2282 };
2283
2284 clobberize(m_graph, m_block->at(m_indexInBlock - 1), [] (auto...) { }, [] (auto...) { }, [] (auto...) { }, validateClobberize);
2285 if (!clobberedWorld) {
2286 auto ok = m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::AbsoluteAddress(&vm().didEnterVM));
2287 m_jit.breakpoint();
2288 ok.link(&m_jit);
2289 } else
2290 m_jit.store8(TrustedImm32(0), &vm().didEnterVM);
2291 }
2292
2293 std::optional<JITSizeStatistics::Marker> sizeMarker;
2294 if (UNLIKELY(Options::dumpDFGJITSizeStatistics())) {
2295 String id = makeString("DFG_fast_", m_graph.opName(m_currentNode->op()));
2296 sizeMarker = vm().jitSizeStatistics->markStart(id, m_jit);
2297 }
2298
2299 compile(m_currentNode);
2300
2301 if (UNLIKELY(sizeMarker))
2302 vm().jitSizeStatistics->markEnd(WTFMove(*sizeMarker), m_jit);
2303
2304 if (belongsInMinifiedGraph(m_currentNode->op()))
2305 m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
2306
2307#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
2308 m_jit.clearRegisterAllocationOffsets();
2309#endif
2310
2311 if (!m_compileOkay) {
2312 bail(DFGBailedAtEndOfNode);
2313 return;
2314 }
2315
2316 // Make sure that the abstract state is rematerialized for the next node.
2317 m_interpreter.executeEffects(m_indexInBlock);
2318 }
2319
2320 // Perform the most basic verification that children have been used correctly.
2321 if (ASSERT_ENABLED) {
2322 for (auto& info : m_generationInfo)
2323 RELEASE_ASSERT(!info.alive());
2324 }
2325}
2326
2327// If we are making type predictions about our arguments then
2328// we need to check that they are correct on function entry.
2329void SpeculativeJIT::checkArgumentTypes()
2330{
2331 ASSERT(!m_currentNode);
2332 m_origin = NodeOrigin(CodeOrigin(BytecodeIndex(0)), CodeOrigin(BytecodeIndex(0)), true);
2333
2334 auto& arguments = m_jit.graph().m_rootToArguments.find(m_jit.graph().block(0))->value;
2335 for (unsigned i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
2336 Node* node = arguments[i];
2337 if (!node) {
2338 // The argument is dead. We don't do any checks for such arguments.
2339 continue;
2340 }
2341
2342 ASSERT(node->op() == SetArgumentDefinitely);
2343 ASSERT(node->shouldGenerate());
2344
2345 VariableAccessData* variableAccessData = node->variableAccessData();
2346 FlushFormat format = variableAccessData->flushFormat();
2347
2348 if (format == FlushedJSValue)
2349 continue;
2350
2351 VirtualRegister virtualRegister = variableAccessData->operand().virtualRegister();
2352 ASSERT(virtualRegister.isArgument());
2353
2354 JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
2355
2356#if USE(JSVALUE64)
2357 switch (format) {
2358 case FlushedInt32: {
2359 speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::numberTagRegister));
2360 break;
2361 }
2362 case FlushedBoolean: {
2363 GPRTemporary temp(this);
2364 m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
2365 m_jit.xor64(TrustedImm32(JSValue::ValueFalse), temp.gpr());
2366 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
2367 break;
2368 }
2369 case FlushedCell: {
2370 speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::notCellMaskRegister));
2371 break;
2372 }
2373 default:
2374 RELEASE_ASSERT_NOT_REACHED();
2375 break;
2376 }
2377#else
2378 switch (format) {
2379 case FlushedInt32: {
2380 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
2381 break;
2382 }
2383 case FlushedBoolean: {
2384 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
2385 break;
2386 }
2387 case FlushedCell: {
2388 speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
2389 break;
2390 }
2391 default:
2392 RELEASE_ASSERT_NOT_REACHED();
2393 break;
2394 }
2395#endif
2396 }
2397
2398 m_origin = NodeOrigin();
2399}
2400
2401bool SpeculativeJIT::compile()
2402{
2403 checkArgumentTypes();
2404
2405 ASSERT(!m_currentNode);
2406 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
2407 m_jit.setForBlockIndex(blockIndex);
2408 m_block = m_jit.graph().block(blockIndex);
2409 compileCurrentBlock();
2410 }
2411 linkBranches();
2412 return true;
2413}
2414
2415void SpeculativeJIT::createOSREntries()
2416{
2417 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
2418 BasicBlock* block = m_jit.graph().block(blockIndex);
2419 if (!block)
2420 continue;
2421 if (block->isOSRTarget || block->isCatchEntrypoint) {
2422 // Currently we don't have OSR entry trampolines. We could add them
2423 // here if need be.
2424 m_osrEntryHeads.append(m_jit.blockHeads()[blockIndex]);
2425 }
2426 }
2427}
2428
2429void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
2430{
2431 unsigned osrEntryIndex = 0;
2432 for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().numBlocks(); ++blockIndex) {
2433 BasicBlock* block = m_jit.graph().block(blockIndex);
2434 if (!block)
2435 continue;
2436 if (!block->isOSRTarget && !block->isCatchEntrypoint)
2437 continue;
2438 if (block->isCatchEntrypoint) {
2439 auto& argumentsVector = m_jit.graph().m_rootToArguments.find(block)->value;
2440 Vector<FlushFormat> argumentFormats;
2441 argumentFormats.reserveInitialCapacity(argumentsVector.size());
2442 for (Node* setArgument : argumentsVector) {
2443 if (setArgument) {
2444 FlushFormat flushFormat = setArgument->variableAccessData()->flushFormat();
2445 ASSERT(flushFormat == FlushedInt32 || flushFormat == FlushedCell || flushFormat == FlushedBoolean || flushFormat == FlushedJSValue);
2446 argumentFormats.uncheckedAppend(flushFormat);
2447 } else
2448 argumentFormats.uncheckedAppend(DeadFlush);
2449 }
2450 m_jit.noticeCatchEntrypoint(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer, WTFMove(argumentFormats));
2451 } else {
2452 ASSERT(block->isOSRTarget);
2453 m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
2454 }
2455 }
2456
2457 m_jit.jitCode()->finalizeOSREntrypoints(WTFMove(m_jit.m_osrEntry));
2458 m_jit.jitCode()->common.finalizeCatchEntrypoints(WTFMove(m_jit.graph().m_catchEntrypoints));
2459
2460 ASSERT(osrEntryIndex == m_osrEntryHeads.size());
2461
2462 if (verboseCompilationEnabled()) {
2463 DumpContext dumpContext;
2464 dataLog("OSR Entries:\n");
2465 for (OSREntryData& entryData : m_jit.jitCode()->m_osrEntry)
2466 dataLog(" ", inContext(entryData, &dumpContext), "\n");
2467 if (!dumpContext.isEmpty())
2468 dumpContext.dump(WTF::dataFile());
2469 }
2470}
2471
2472void SpeculativeJIT::compileCheckTraps(Node* node)
2473{
2474 ASSERT(Options::usePollingTraps());
2475 GPRTemporary unused(this);
2476 GPRReg unusedGPR = unused.gpr();
2477
2478 JITCompiler::Jump needTrapHandling = m_jit.branchTest32(JITCompiler::NonZero,
2479 JITCompiler::AbsoluteAddress(m_jit.vm().traps().trapBitsAddress()),
2480 TrustedImm32(VMTraps::AsyncEvents));
2481
2482 addSlowPathGenerator(slowPathCall(needTrapHandling, this, operationHandleTraps, unusedGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic))));
2483 noResult(node);
2484}
2485
2486void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
2487{
2488 Edge child3 = m_jit.graph().varArgChild(node, 2);
2489 Edge child4 = m_jit.graph().varArgChild(node, 3);
2490
2491 ArrayMode arrayMode = node->arrayMode();
2492
2493 GPRReg baseReg = base.gpr();
2494 GPRReg propertyReg = property.gpr();
2495
2496 SpeculateDoubleOperand value(this, child3);
2497
2498 FPRReg valueReg = value.fpr();
2499
2500 DFG_TYPE_CHECK(
2501 JSValueRegs(), child3, SpecFullRealNumber,
2502 m_jit.branchIfNaN(valueReg));
2503
2504 if (!m_compileOkay)
2505 return;
2506
2507 StorageOperand storage(this, child4);
2508 GPRReg storageReg = storage.gpr();
2509
2510 if (node->op() == PutByValAlias) {
2511 // Store the value to the array.
2512 GPRReg propertyReg = property.gpr();
2513 FPRReg valueReg = value.fpr();
2514 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2515
2516 noResult(m_currentNode);
2517 return;
2518 }
2519
2520 GPRTemporary temporary;
2521 GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
2522
2523 MacroAssembler::Jump slowCase;
2524
2525 if (arrayMode.isInBounds()) {
2526 speculationCheck(
2527 OutOfBounds, JSValueRegs(), nullptr,
2528 m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
2529 } else {
2530 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2531
2532 slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
2533
2534 if (!arrayMode.isOutOfBounds())
2535 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, slowCase);
2536
2537 m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
2538 m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
2539
2540 inBounds.link(&m_jit);
2541 }
2542
2543 m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
2544
2545 base.use();
2546 property.use();
2547 value.use();
2548 storage.use();
2549
2550 if (arrayMode.isOutOfBounds()) {
2551 addSlowPathGenerator(
2552 slowPathCall(
2553 slowCase, this,
2554 node->ecmaMode().isStrict()
2555 ? (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsStrict)
2556 : (node->op() == PutByValDirect ? operationPutDoubleByValDirectBeyondArrayBoundsNonStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict),
2557 NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg, valueReg));
2558 }
2559
2560 noResult(m_currentNode, UseChildrenCalledExplicitly);
2561}
2562
2563void SpeculativeJIT::compileGetCharCodeAt(Node* node)
2564{
2565 SpeculateCellOperand string(this, node->child1());
2566 SpeculateStrictInt32Operand index(this, node->child2());
2567 StorageOperand storage(this, node->child3());
2568
2569 GPRReg stringReg = string.gpr();
2570 GPRReg indexReg = index.gpr();
2571 GPRReg storageReg = storage.gpr();
2572
2573 ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
2574
2575 GPRTemporary scratch(this);
2576 GPRReg scratchReg = scratch.gpr();
2577
2578 m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
2579
2580 // unsigned comparison so we can filter out negative indices and indices that are too large
2581 speculationCheck(Uncountable, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, CCallHelpers::Address(scratchReg, StringImpl::lengthMemoryOffset())));
2582
2583 // Load the character into scratchReg
2584 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2585
2586 m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
2587 JITCompiler::Jump cont8Bit = m_jit.jump();
2588
2589 is16Bit.link(&m_jit);
2590
2591 m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
2592
2593 cont8Bit.link(&m_jit);
2594
2595 strictInt32Result(scratchReg, m_currentNode);
2596}
2597
2598void SpeculativeJIT::compileGetByValOnString(Node* node)
2599{
2600 SpeculateCellOperand base(this, m_graph.child(node, 0));
2601 SpeculateStrictInt32Operand property(this, m_graph.child(node, 1));
2602 StorageOperand storage(this, m_graph.child(node, 2));
2603 GPRReg baseReg = base.gpr();
2604 GPRReg propertyReg = property.gpr();
2605 GPRReg storageReg = storage.gpr();
2606
2607 GPRTemporary scratch(this);
2608 GPRReg scratchReg = scratch.gpr();
2609#if USE(JSVALUE32_64)
2610 GPRTemporary resultTag;
2611 GPRReg resultTagReg = InvalidGPRReg;
2612 if (node->arrayMode().isOutOfBounds()) {
2613 GPRTemporary realResultTag(this);
2614 resultTag.adopt(realResultTag);
2615 resultTagReg = resultTag.gpr();
2616 }
2617#endif
2618
2619 // unsigned comparison so we can filter out negative indices and indices that are too large
2620 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
2621 JITCompiler::Jump outOfBounds = m_jit.branch32(
2622 MacroAssembler::AboveOrEqual, propertyReg,
2623 MacroAssembler::Address(scratchReg, StringImpl::lengthMemoryOffset()));
2624 if (node->arrayMode().isInBounds())
2625 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
2626
2627 // Load the character into scratchReg
2628 JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
2629
2630 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
2631 JITCompiler::Jump cont8Bit = m_jit.jump();
2632
2633 is16Bit.link(&m_jit);
2634
2635 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
2636
2637 JITCompiler::Jump bigCharacter =
2638 m_jit.branch32(MacroAssembler::Above, scratchReg, TrustedImm32(maxSingleCharacterString));
2639
2640 // 8 bit string values don't need the isASCII check.
2641 cont8Bit.link(&m_jit);
2642
2643 VM& vm = this->vm();
2644 m_jit.lshift32(MacroAssembler::TrustedImm32(sizeof(void*) == 4 ? 2 : 3), scratchReg);
2645 m_jit.addPtr(TrustedImmPtr(vm.smallStrings.singleCharacterStrings()), scratchReg);
2646 m_jit.loadPtr(scratchReg, scratchReg);
2647
2648 addSlowPathGenerator(
2649 slowPathCall(
2650 bigCharacter, this, operationSingleCharacterString, scratchReg, &vm, scratchReg));
2651
2652 if (node->arrayMode().isOutOfBounds()) {
2653#if USE(JSVALUE32_64)
2654 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagReg);
2655#endif
2656
2657 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
2658 Structure* stringPrototypeStructure = globalObject->stringPrototype()->structure(vm);
2659 Structure* objectPrototypeStructure = globalObject->objectPrototype()->structure(vm);
2660 WTF::dependentLoadLoadFence();
2661
2662 if (globalObject->stringPrototypeChainIsSane()) {
2663 // FIXME: This could be captured using a Speculation mode that means "out-of-bounds
2664 // loads return a trivial value". Something like OutOfBoundsSaneChain. This should
2665 // speculate that we don't take negative out-of-bounds, or better yet, it should rely
2666 // on a stringPrototypeChainIsSane() guaranteeing that the prototypes have no negative
2667 // indexed properties either.
2668 // https://bugs.webkit.org/show_bug.cgi?id=144668
2669 m_jit.graph().registerAndWatchStructureTransition(stringPrototypeStructure);
2670 m_jit.graph().registerAndWatchStructureTransition(objectPrototypeStructure);
2671
2672#if USE(JSVALUE64)
2673 addSlowPathGenerator(makeUnique<SaneStringGetByValSlowPathGenerator>(
2674 outOfBounds, this, JSValueRegs(scratchReg), TrustedImmPtr::weakPointer(m_graph, globalObject), baseReg, propertyReg));
2675#else
2676 addSlowPathGenerator(makeUnique<SaneStringGetByValSlowPathGenerator>(
2677 outOfBounds, this, JSValueRegs(resultTagReg, scratchReg), TrustedImmPtr::weakPointer(m_graph, globalObject), baseReg, propertyReg));
2678#endif
2679 } else {
2680#if USE(JSVALUE64)
2681 addSlowPathGenerator(
2682 slowPathCall(
2683 outOfBounds, this, operationGetByValStringInt,
2684 scratchReg, TrustedImmPtr::weakPointer(m_graph, globalObject), baseReg, propertyReg));
2685#else
2686 addSlowPathGenerator(
2687 slowPathCall(
2688 outOfBounds, this, operationGetByValStringInt,
2689 JSValueRegs(resultTagReg, scratchReg), TrustedImmPtr::weakPointer(m_graph, globalObject), baseReg, propertyReg));
2690#endif
2691 }
2692
2693#if USE(JSVALUE64)
2694 jsValueResult(scratchReg, m_currentNode);
2695#else
2696 jsValueResult(resultTagReg, scratchReg, m_currentNode);
2697#endif
2698 } else
2699 cellResult(scratchReg, m_currentNode);
2700}
2701
2702void SpeculativeJIT::compileFromCharCode(Node* node)
2703{
2704 Edge& child = node->child1();
2705 if (child.useKind() == UntypedUse) {
2706 JSValueOperand opr(this, child);
2707 JSValueRegs oprRegs = opr.jsValueRegs();
2708
2709 flushRegisters();
2710 JSValueRegsFlushedCallResult result(this);
2711 JSValueRegs resultRegs = result.regs();
2712 callOperation(operationStringFromCharCodeUntyped, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), oprRegs);
2713 m_jit.exceptionCheck();
2714
2715 jsValueResult(resultRegs, node);
2716 return;
2717 }
2718
2719 SpeculateStrictInt32Operand property(this, child);
2720 GPRReg propertyReg = property.gpr();
2721 GPRTemporary smallStrings(this);
2722 GPRTemporary scratch(this);
2723 GPRReg scratchReg = scratch.gpr();
2724 GPRReg smallStringsReg = smallStrings.gpr();
2725
2726 JITCompiler::JumpList slowCases;
2727 slowCases.append(m_jit.branch32(MacroAssembler::Above, propertyReg, TrustedImm32(maxSingleCharacterString)));
2728 m_jit.move(TrustedImmPtr(vm().smallStrings.singleCharacterStrings()), smallStringsReg);
2729 m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
2730
2731 slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
2732 addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), propertyReg));
2733 cellResult(scratchReg, m_currentNode);
2734}
2735
2736GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
2737{
2738 VirtualRegister virtualRegister = node->virtualRegister();
2739 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2740
2741 switch (info.registerFormat()) {
2742 case DataFormatStorage:
2743 RELEASE_ASSERT_NOT_REACHED();
2744
2745 case DataFormatBoolean:
2746 case DataFormatCell:
2747 terminateSpeculativeExecution(Uncountable, JSValueRegs(), nullptr);
2748 return GeneratedOperandTypeUnknown;
2749
2750 case DataFormatNone:
2751 case DataFormatJSCell:
2752 case DataFormatJS:
2753 case DataFormatJSBoolean:
2754 case DataFormatJSDouble:
2755 case DataFormatJSBigInt32:
2756 return GeneratedOperandJSValue;
2757
2758 case DataFormatJSInt32:
2759 case DataFormatInt32:
2760 return GeneratedOperandInteger;
2761
2762 default:
2763 RELEASE_ASSERT_NOT_REACHED();
2764 return GeneratedOperandTypeUnknown;
2765 }
2766}
2767
2768void SpeculativeJIT::compileValueToInt32(Node* node)
2769{
2770 switch (node->child1().useKind()) {
2771#if USE(JSVALUE64)
2772 case Int52RepUse: {
2773 SpeculateStrictInt52Operand op1(this, node->child1());
2774 GPRTemporary result(this, Reuse, op1);
2775 GPRReg op1GPR = op1.gpr();
2776 GPRReg resultGPR = result.gpr();
2777 m_jit.zeroExtend32ToWord(op1GPR, resultGPR);
2778 strictInt32Result(resultGPR, node, DataFormatInt32);
2779 return;
2780 }
2781#endif // USE(JSVALUE64)
2782
2783 case DoubleRepUse: {
2784 GPRTemporary result(this);
2785 SpeculateDoubleOperand op1(this, node->child1());
2786 FPRReg fpr = op1.fpr();
2787 GPRReg gpr = result.gpr();
2788#if CPU(ARM64)
2789 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics())
2790 m_jit.convertDoubleToInt32UsingJavaScriptSemantics(fpr, gpr);
2791 else
2792#endif
2793 {
2794 JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
2795 addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this,
2796 hasSensibleDoubleToInt() ? operationToInt32SensibleSlow : operationToInt32, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, gpr, fpr));
2797 }
2798 strictInt32Result(gpr, node);
2799 return;
2800 }
2801
2802 case NumberUse:
2803 case NotCellNorBigIntUse: {
2804 switch (checkGeneratedTypeForToInt32(node->child1().node())) {
2805 case GeneratedOperandInteger: {
2806 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
2807 GPRTemporary result(this, Reuse, op1);
2808 m_jit.move(op1.gpr(), result.gpr());
2809 strictInt32Result(result.gpr(), node, op1.format());
2810 return;
2811 }
2812 case GeneratedOperandJSValue: {
2813 GPRTemporary result(this);
2814#if USE(JSVALUE64)
2815 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2816
2817 GPRReg gpr = op1.gpr();
2818 GPRReg resultGpr = result.gpr();
2819 FPRTemporary tempFpr(this);
2820 FPRReg fpr = tempFpr.fpr();
2821
2822 JITCompiler::Jump isInteger = m_jit.branchIfInt32(gpr);
2823 JITCompiler::JumpList converted;
2824
2825 if (node->child1().useKind() == NumberUse) {
2826 DFG_TYPE_CHECK(
2827 JSValueRegs(gpr), node->child1(), SpecBytecodeNumber,
2828 m_jit.branchIfNotNumber(gpr));
2829 } else {
2830 JITCompiler::Jump isNumber = m_jit.branchIfNumber(gpr);
2831
2832 DFG_TYPE_CHECK(
2833 JSValueRegs(gpr), node->child1(), ~SpecCellCheck, m_jit.branchIfCell(JSValueRegs(gpr)));
2834#if USE(BIGINT32)
2835 DFG_TYPE_CHECK(
2836 JSValueRegs(gpr), node->child1(), ~SpecCellCheck & ~SpecBigInt, m_jit.branchIfBigInt32(JSValueRegs(gpr), resultGpr));
2837#endif
2838
2839 // It's not a cell: so true turns into 1 and all else turns into 0.
2840 m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(JSValue::ValueTrue), resultGpr);
2841 converted.append(m_jit.jump());
2842
2843 isNumber.link(&m_jit);
2844 }
2845
2846 // First, if we get here we have a double encoded as a JSValue
2847 unboxDouble(gpr, resultGpr, fpr);
2848#if CPU(ARM64)
2849 if (MacroAssemblerARM64::supportsDoubleToInt32ConversionUsingJavaScriptSemantics())
2850 m_jit.convertDoubleToInt32UsingJavaScriptSemantics(fpr, resultGpr);
2851 else
2852#endif
2853 {
2854 silentSpillAllRegisters(resultGpr);
2855 callOperation(operationToInt32, resultGpr, fpr);
2856 silentFillAllRegisters();
2857 }
2858
2859 converted.append(m_jit.jump());
2860
2861 isInteger.link(&m_jit);
2862 m_jit.zeroExtend32ToWord(gpr, resultGpr);
2863
2864 converted.link(&m_jit);
2865#else
2866 Node* childNode = node->child1().node();
2867 VirtualRegister virtualRegister = childNode->virtualRegister();
2868 GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
2869
2870 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
2871
2872 GPRReg payloadGPR = op1.payloadGPR();
2873 GPRReg resultGpr = result.gpr();
2874
2875 JITCompiler::JumpList converted;
2876
2877 if (info.registerFormat() == DataFormatJSInt32)
2878 m_jit.move(payloadGPR, resultGpr);
2879 else {
2880 GPRReg tagGPR = op1.tagGPR();
2881 FPRTemporary tempFpr(this);
2882 FPRReg fpr = tempFpr.fpr();
2883 FPRTemporary scratch(this);
2884
2885 JITCompiler::Jump isInteger = m_jit.branchIfInt32(tagGPR);
2886
2887 if (node->child1().useKind() == NumberUse) {
2888 DFG_TYPE_CHECK(
2889 op1.jsValueRegs(), node->child1(), SpecBytecodeNumber,
2890 m_jit.branch32(
2891 MacroAssembler::AboveOrEqual, tagGPR,
2892 TrustedImm32(JSValue::LowestTag)));
2893 } else {
2894 JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
2895
2896 DFG_TYPE_CHECK(
2897 op1.jsValueRegs(), node->child1(), ~SpecCell,
2898 m_jit.branchIfCell(op1.jsValueRegs()));
2899
2900 // It's not a cell: so true turns into 1 and all else turns into 0.
2901 JITCompiler::Jump isBoolean = m_jit.branchIfBoolean(tagGPR, InvalidGPRReg);
2902 m_jit.move(TrustedImm32(0), resultGpr);
2903 converted.append(m_jit.jump());
2904
2905 isBoolean.link(&m_jit);
2906 m_jit.move(payloadGPR, resultGpr);
2907 converted.append(m_jit.jump());
2908
2909 isNumber.link(&m_jit);
2910 }
2911
2912 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
2913
2914 silentSpillAllRegisters(resultGpr);
2915 callOperation(operationToInt32, resultGpr, fpr);
2916 silentFillAllRegisters();
2917
2918 converted.append(m_jit.jump());
2919
2920 isInteger.link(&m_jit);
2921 m_jit.move(payloadGPR, resultGpr);
2922
2923 converted.link(&m_jit);
2924 }
2925#endif
2926 strictInt32Result(resultGpr, node);
2927 return;
2928 }
2929 case GeneratedOperandTypeUnknown:
2930 RELEASE_ASSERT(!m_compileOkay);
2931 return;
2932 }
2933 RELEASE_ASSERT_NOT_REACHED();
2934 return;
2935 }
2936
2937 default:
2938 ASSERT(!m_compileOkay);
2939 return;
2940 }
2941}
2942
2943void SpeculativeJIT::compileUInt32ToNumber(Node* node)
2944{
2945 if (doesOverflow(node->arithMode())) {
2946 if (enableInt52()) {
2947 SpeculateInt32Operand op1(this, node->child1());
2948 GPRTemporary result(this, Reuse, op1);
2949 m_jit.zeroExtend32ToWord(op1.gpr(), result.gpr());
2950 strictInt52Result(result.gpr(), node);
2951 return;
2952 }
2953 SpeculateInt32Operand op1(this, node->child1());
2954 FPRTemporary result(this);
2955
2956 GPRReg inputGPR = op1.gpr();
2957 FPRReg outputFPR = result.fpr();
2958
2959 m_jit.convertInt32ToDouble(inputGPR, outputFPR);
2960
2961 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
2962 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
2963 positive.link(&m_jit);
2964
2965 doubleResult(outputFPR, node);
2966 return;
2967 }
2968
2969 RELEASE_ASSERT(node->arithMode() == Arith::CheckOverflow);
2970
2971 SpeculateInt32Operand op1(this, node->child1());
2972 GPRTemporary result(this);
2973
2974 m_jit.move(op1.gpr(), result.gpr());
2975
2976 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)));
2977
2978 strictInt32Result(result.gpr(), node, op1.format());
2979}
2980
2981void SpeculativeJIT::compileDoubleAsInt32(Node* node)
2982{
2983 SpeculateDoubleOperand op1(this, node->child1());
2984 FPRTemporary scratch(this);
2985 GPRTemporary result(this);
2986
2987 FPRReg valueFPR = op1.fpr();
2988 FPRReg scratchFPR = scratch.fpr();
2989 GPRReg resultGPR = result.gpr();
2990
2991 JITCompiler::JumpList failureCases;
2992 RELEASE_ASSERT(shouldCheckOverflow(node->arithMode()));
2993 m_jit.branchConvertDoubleToInt32(
2994 valueFPR, resultGPR, failureCases, scratchFPR,
2995 shouldCheckNegativeZero(node->arithMode()));
2996 speculationCheck(Overflow, JSValueRegs(), nullptr, failureCases);
2997
2998 strictInt32Result(resultGPR, node);
2999}
3000
3001void SpeculativeJIT::compileDoubleRep(Node* node)
3002{
3003 switch (node->child1().useKind()) {
3004 case RealNumberUse: {
3005 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
3006 FPRTemporary result(this);
3007
3008 JSValueRegs op1Regs = op1.jsValueRegs();
3009 FPRReg resultFPR = result.fpr();
3010
3011#if USE(JSVALUE64)
3012 GPRTemporary temp(this);
3013 GPRReg tempGPR = temp.gpr();
3014 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
3015#else
3016 FPRTemporary temp(this);
3017 FPRReg tempFPR = temp.fpr();
3018 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
3019#endif
3020
3021 JITCompiler::Jump done = m_jit.branchIfNotNaN(resultFPR);
3022
3023 DFG_TYPE_CHECK(
3024 op1Regs, node->child1(), SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
3025 m_jit.convertInt32ToDouble(op1Regs.payloadGPR(), resultFPR);
3026
3027 done.link(&m_jit);
3028
3029 doubleResult(resultFPR, node);
3030 return;
3031 }
3032
3033 case NotCellNorBigIntUse:
3034 case NumberUse: {
3035 SpeculatedType possibleTypes = m_state.forNode(node->child1()).m_type;
3036 if (isInt32Speculation(possibleTypes)) {
3037 SpeculateInt32Operand op1(this, node->child1(), ManualOperandSpeculation);
3038 FPRTemporary result(this);
3039 m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
3040 doubleResult(result.fpr(), node);
3041 return;
3042 }
3043
3044 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
3045 FPRTemporary result(this);
3046
3047#if USE(JSVALUE64)
3048 GPRTemporary temp(this);
3049
3050 GPRReg op1GPR = op1.gpr();
3051 GPRReg tempGPR = temp.gpr();
3052 FPRReg resultFPR = result.fpr();
3053 JITCompiler::JumpList done;
3054
3055 JITCompiler::Jump isInteger = m_jit.branchIfInt32(op1GPR);
3056
3057 if (node->child1().useKind() == NotCellNorBigIntUse) {
3058 JITCompiler::Jump isNumber = m_jit.branchIfNumber(op1GPR);
3059 JITCompiler::Jump isUndefined = m_jit.branchIfUndefined(op1GPR);
3060
3061 static constexpr double zero = 0;
3062 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
3063
3064 JITCompiler::Jump isNull = m_jit.branchIfNull(op1GPR);
3065 done.append(isNull);
3066
3067 DFG_TYPE_CHECK(JSValueRegs(op1GPR), node->child1(), ~SpecCellCheck & ~SpecBigInt,
3068 m_jit.branchTest64(JITCompiler::Zero, op1GPR, TrustedImm32(JSValue::BoolTag)));
3069
3070 JITCompiler::Jump isFalse = m_jit.branch64(JITCompiler::Equal, op1GPR, TrustedImm64(JSValue::ValueFalse));
3071 static constexpr double one = 1;
3072 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
3073 done.append(m_jit.jump());
3074 done.append(isFalse);
3075
3076 isUndefined.link(&m_jit);
3077 static const double NaN = PNaN;
3078 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
3079 done.append(m_jit.jump());
3080
3081 isNumber.link(&m_jit);
3082 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
3083 typeCheck(
3084 JSValueRegs(op1GPR), node->child1(), SpecBytecodeNumber,
3085 m_jit.branchIfNotNumber(op1GPR));
3086 }
3087
3088 unboxDouble(op1GPR, tempGPR, resultFPR);
3089 done.append(m_jit.jump());
3090
3091 isInteger.link(&m_jit);
3092 m_jit.convertInt32ToDouble(op1GPR, resultFPR);
3093 done.link(&m_jit);
3094#else // USE(JSVALUE64) -> this is the 32_64 case
3095 FPRTemporary temp(this);
3096
3097 GPRReg op1TagGPR = op1.tagGPR();
3098 GPRReg op1PayloadGPR = op1.payloadGPR();
3099 FPRReg tempFPR = temp.fpr();
3100 FPRReg resultFPR = result.fpr();
3101 JITCompiler::JumpList done;
3102
3103 JITCompiler::Jump isInteger = m_jit.branchIfInt32(op1TagGPR);
3104
3105 if (node->child1().useKind() == NotCellNorBigIntUse) {
3106 JITCompiler::Jump isNumber = m_jit.branch32(JITCompiler::Below, op1TagGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1));
3107 JITCompiler::Jump isUndefined = m_jit.branchIfUndefined(op1TagGPR);
3108
3109 static constexpr double zero = 0;
3110 m_jit.loadDouble(TrustedImmPtr(&zero), resultFPR);
3111
3112 JITCompiler::Jump isNull = m_jit.branchIfNull(op1TagGPR);
3113 done.append(isNull);
3114
3115 DFG_TYPE_CHECK(JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), ~SpecCell, m_jit.branchIfNotBoolean(op1TagGPR, InvalidGPRReg));
3116
3117 JITCompiler::Jump isFalse = m_jit.branchTest32(JITCompiler::Zero, op1PayloadGPR, TrustedImm32(1));
3118 static constexpr double one = 1;
3119 m_jit.loadDouble(TrustedImmPtr(&one), resultFPR);
3120 done.append(m_jit.jump());
3121 done.append(isFalse);
3122
3123 isUndefined.link(&m_jit);
3124 static const double NaN = PNaN;
3125 m_jit.loadDouble(TrustedImmPtr(&NaN), resultFPR);
3126 done.append(m_jit.jump());
3127
3128 isNumber.link(&m_jit);
3129 } else if (needsTypeCheck(node->child1(), SpecBytecodeNumber)) {
3130 // This check fails with Int32Tag, but it is OK since Int32 case is already excluded.
3131 typeCheck(
3132 JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecBytecodeNumber,
3133 m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
3134 }
3135
3136 unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
3137 done.append(m_jit.jump());
3138
3139 isInteger.link(&m_jit);
3140 m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
3141 done.link(&m_jit);
3142#endif // USE(JSVALUE64)
3143
3144 doubleResult(resultFPR, node);
3145 return;
3146 }
3147
3148#if USE(JSVALUE64)
3149 case Int52RepUse: {
3150 SpeculateStrictInt52Operand value(this, node->child1());
3151 FPRTemporary result(this);
3152
3153 GPRReg valueGPR = value.gpr();
3154 FPRReg resultFPR = result.fpr();
3155
3156 m_jit.convertInt64ToDouble(valueGPR, resultFPR);
3157
3158 doubleResult(resultFPR, node);
3159 return;
3160 }
3161#endif // USE(JSVALUE64)
3162
3163 default:
3164 RELEASE_ASSERT_NOT_REACHED();
3165 return;
3166 }
3167}
3168
3169void SpeculativeJIT::compileValueRep(Node* node)
3170{
3171 switch (node->child1().useKind()) {
3172 case DoubleRepUse: {
3173 SpeculateDoubleOperand value(this, node->child1());
3174 JSValueRegsTemporary result(this);
3175
3176 FPRReg valueFPR = value.fpr();
3177 JSValueRegs resultRegs = result.regs();
3178
3179 // It's very tempting to in-place filter the value to indicate that it's not impure NaN
3180 // anymore. Unfortunately, this would be unsound. If it's a GetLocal or if the value was
3181 // subject to a prior SetLocal, filtering the value would imply that the corresponding
3182 // local was purified.
3183 if (needsTypeCheck(node->child1(), ~SpecDoubleImpureNaN))
3184 m_jit.purifyNaN(valueFPR);
3185
3186 boxDouble(valueFPR, resultRegs);
3187
3188 jsValueResult(resultRegs, node);
3189 return;
3190 }
3191
3192#if USE(JSVALUE64)
3193 case Int52RepUse: {
3194 SpeculateStrictInt52Operand value(this, node->child1());
3195 GPRTemporary result(this);
3196
3197 GPRReg valueGPR = value.gpr();
3198 GPRReg resultGPR = result.gpr();
3199
3200 boxInt52(valueGPR, resultGPR, DataFormatStrictInt52);
3201
3202 jsValueResult(resultGPR, node);
3203 return;
3204 }
3205#endif // USE(JSVALUE64)
3206
3207 default:
3208 RELEASE_ASSERT_NOT_REACHED();
3209 return;
3210 }
3211}
3212
3213static double clampDoubleToByte(double d)
3214{
3215 d += 0.5;
3216 if (!(d > 0))
3217 d = 0;
3218 else if (d > 255)
3219 d = 255;
3220 return d;
3221}
3222
3223static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
3224{
3225 MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
3226 MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
3227 jit.xorPtr(result, result);
3228 MacroAssembler::Jump clamped = jit.jump();
3229 tooBig.link(&jit);
3230 jit.move(JITCompiler::TrustedImm32(255), result);
3231 clamped.link(&jit);
3232 inBounds.link(&jit);
3233}
3234
3235static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
3236{
3237 // Unordered compare so we pick up NaN
3238 static constexpr double zero = 0;
3239 static constexpr double byteMax = 255;
3240 static constexpr double half = 0.5;
3241 jit.loadDouble(JITCompiler::TrustedImmPtr(&zero), scratch);
3242 MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
3243 jit.loadDouble(JITCompiler::TrustedImmPtr(&byteMax), scratch);
3244 MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThanAndOrdered, source, scratch);
3245
3246 jit.loadDouble(JITCompiler::TrustedImmPtr(&half), scratch);
3247 // FIXME: This should probably just use a floating point round!
3248 // https://bugs.webkit.org/show_bug.cgi?id=72054
3249 jit.addDouble(source, scratch);
3250 jit.truncateDoubleToInt32(scratch, result);
3251 MacroAssembler::Jump truncatedInt = jit.jump();
3252
3253 tooSmall.link(&jit);
3254 jit.xorPtr(result, result);
3255 MacroAssembler::Jump zeroed = jit.jump();
3256
3257 tooBig.link(&jit);
3258 jit.move(JITCompiler::TrustedImm32(255), result);
3259
3260 truncatedInt.link(&jit);
3261 zeroed.link(&jit);
3262
3263}
3264
3265JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayOutOfBounds(Node* node, GPRReg baseGPR, GPRReg indexGPR)
3266{
3267 if (node->op() == PutByValAlias)
3268 return JITCompiler::Jump();
3269 JSArrayBufferView* view = m_jit.graph().tryGetFoldableView(
3270 m_state.forNode(m_jit.graph().child(node, 0)).m_value, node->arrayMode());
3271 if (view) {
3272 uint32_t length = view->length();
3273 Node* indexNode = m_jit.graph().child(node, 1).node();
3274 if (indexNode->isInt32Constant() && indexNode->asUInt32() < length)
3275 return JITCompiler::Jump();
3276 return m_jit.branch32(
3277 MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Imm32(length));
3278 }
3279 return m_jit.branch32(
3280 MacroAssembler::AboveOrEqual, indexGPR,
3281 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()));
3282}
3283
3284void SpeculativeJIT::emitTypedArrayBoundsCheck(Node* node, GPRReg baseGPR, GPRReg indexGPR)
3285{
3286 JITCompiler::Jump jump = jumpForTypedArrayOutOfBounds(node, baseGPR, indexGPR);
3287 if (!jump.isSet())
3288 return;
3289 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, jump);
3290}
3291
3292JITCompiler::Jump SpeculativeJIT::jumpForTypedArrayIsDetachedIfOutOfBounds(Node* node, GPRReg base, JITCompiler::Jump outOfBounds)
3293{
3294 JITCompiler::Jump done;
3295 if (outOfBounds.isSet()) {
3296 done = m_jit.jump();
3297 if (node->arrayMode().isInBounds())
3298 speculationCheck(OutOfBounds, JSValueSource(), nullptr, outOfBounds);
3299 else {
3300 outOfBounds.link(&m_jit);
3301
3302 JITCompiler::Jump notWasteful = m_jit.branch32(
3303 MacroAssembler::NotEqual,
3304 MacroAssembler::Address(base, JSArrayBufferView::offsetOfMode()),
3305 TrustedImm32(WastefulTypedArray));
3306
3307 JITCompiler::Jump hasNullVector;
3308#if CPU(ARM64E)
3309 {
3310 GPRReg scratch = m_jit.scratchRegister();
3311 DisallowMacroScratchRegisterUsage disallowScratch(m_jit);
3312
3313 m_jit.loadPtr(MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()), scratch);
3314 m_jit.removeArrayPtrTag(scratch);
3315 hasNullVector = m_jit.branchTestPtr(MacroAssembler::Zero, scratch);
3316 }
3317#else // CPU(ARM64E)
3318 hasNullVector = m_jit.branchTestPtr(
3319 MacroAssembler::Zero,
3320 MacroAssembler::Address(base, JSArrayBufferView::offsetOfVector()));
3321#endif
3322 speculationCheck(Uncountable, JSValueSource(), node, hasNullVector);
3323 notWasteful.link(&m_jit);
3324 }
3325 }
3326 return done;
3327}
3328
3329void SpeculativeJIT::loadFromIntTypedArray(GPRReg storageReg, GPRReg propertyReg, GPRReg resultReg, TypedArrayType type)
3330{
3331 switch (elementSize(type)) {
3332 case 1:
3333 if (isSigned(type))
3334 m_jit.load8SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
3335 else
3336 m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
3337 break;
3338 case 2:
3339 if (isSigned(type))
3340 m_jit.load16SignedExtendTo32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
3341 else
3342 m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
3343 break;
3344 case 4:
3345 m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3346 break;
3347 default:
3348 CRASH();
3349 }
3350}
3351
3352void SpeculativeJIT::setIntTypedArrayLoadResult(Node* node, GPRReg resultReg, TypedArrayType type, bool canSpeculate)
3353{
3354 if (elementSize(type) < 4 || isSigned(type)) {
3355 strictInt32Result(resultReg, node);
3356 return;
3357 }
3358
3359 ASSERT(elementSize(type) == 4 && !isSigned(type));
3360 if (node->shouldSpeculateInt32() && canSpeculate) {
3361 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)));
3362 strictInt32Result(resultReg, node);
3363 return;
3364 }
3365
3366#if USE(JSVALUE64)
3367 if (node->shouldSpeculateInt52()) {
3368 ASSERT(enableInt52());
3369 m_jit.zeroExtend32ToWord(resultReg, resultReg);
3370 strictInt52Result(resultReg, node);
3371 return;
3372 }
3373#endif
3374
3375 FPRTemporary fresult(this);
3376 m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
3377 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
3378 m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
3379 positive.link(&m_jit);
3380 doubleResult(fresult.fpr(), node);
3381}
3382
3383void SpeculativeJIT::compileGetByValOnIntTypedArray(Node* node, TypedArrayType type)
3384{
3385 ASSERT(isInt(type));
3386
3387 SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
3388 SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
3389 StorageOperand storage(this, m_graph.varArgChild(node, 2));
3390
3391 GPRReg baseReg = base.gpr();
3392 GPRReg propertyReg = property.gpr();
3393 GPRReg storageReg = storage.gpr();
3394
3395 GPRTemporary result(this);
3396 GPRReg resultReg = result.gpr();
3397
3398 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3399 loadFromIntTypedArray(storageReg, propertyReg, resultReg, type);
3400 constexpr bool canSpeculate = true;
3401 setIntTypedArrayLoadResult(node, resultReg, type, canSpeculate);
3402}
3403
3404bool SpeculativeJIT::getIntTypedArrayStoreOperand(
3405 GPRTemporary& value,
3406 GPRReg property,
3407#if USE(JSVALUE32_64)
3408 GPRTemporary& propertyTag,
3409 GPRTemporary& valueTag,
3410#endif
3411 Edge valueUse, JITCompiler::JumpList& slowPathCases, bool isClamped)
3412{
3413 bool isAppropriateConstant = false;
3414 if (valueUse->isConstant()) {
3415 JSValue jsValue = valueUse->asJSValue();
3416 SpeculatedType expectedType = typeFilterFor(valueUse.useKind());
3417 SpeculatedType actualType = speculationFromValue(jsValue);
3418 isAppropriateConstant = (expectedType | actualType) == expectedType;
3419 }
3420
3421 if (isAppropriateConstant) {
3422 JSValue jsValue = valueUse->asJSValue();
3423 if (!jsValue.isNumber()) {
3424 terminateSpeculativeExecution(Uncountable, JSValueRegs(), nullptr);
3425 return false;
3426 }
3427 double d = jsValue.asNumber();
3428 if (isClamped)
3429 d = clampDoubleToByte(d);
3430 GPRTemporary scratch(this);
3431 GPRReg scratchReg = scratch.gpr();
3432 m_jit.move(Imm32(toInt32(d)), scratchReg);
3433 value.adopt(scratch);
3434 } else {
3435 switch (valueUse.useKind()) {
3436 case Int32Use: {
3437 SpeculateInt32Operand valueOp(this, valueUse);
3438 GPRTemporary scratch(this);
3439 GPRReg scratchReg = scratch.gpr();
3440 m_jit.move(valueOp.gpr(), scratchReg);
3441 if (isClamped)
3442 compileClampIntegerToByte(m_jit, scratchReg);
3443 value.adopt(scratch);
3444 break;
3445 }
3446
3447#if USE(JSVALUE64)
3448 case Int52RepUse: {
3449 SpeculateStrictInt52Operand valueOp(this, valueUse);
3450 GPRTemporary scratch(this);
3451 GPRReg scratchReg = scratch.gpr();
3452 m_jit.move(valueOp.gpr(), scratchReg);
3453 if (isClamped) {
3454 MacroAssembler::Jump inBounds = m_jit.branch64(
3455 MacroAssembler::BelowOrEqual, scratchReg, JITCompiler::TrustedImm64(0xff));
3456 MacroAssembler::Jump tooBig = m_jit.branch64(
3457 MacroAssembler::GreaterThan, scratchReg, JITCompiler::TrustedImm64(0xff));
3458 m_jit.move(TrustedImm32(0), scratchReg);
3459 MacroAssembler::Jump clamped = m_jit.jump();
3460 tooBig.link(&m_jit);
3461 m_jit.move(JITCompiler::TrustedImm32(255), scratchReg);
3462 clamped.link(&m_jit);
3463 inBounds.link(&m_jit);
3464 }
3465 value.adopt(scratch);
3466 break;
3467 }
3468#endif // USE(JSVALUE64)
3469
3470 case DoubleRepUse: {
3471 RELEASE_ASSERT(!isAtomicsIntrinsic(m_currentNode->op()));
3472 if (isClamped) {
3473 SpeculateDoubleOperand valueOp(this, valueUse);
3474 GPRTemporary result(this);
3475 FPRTemporary floatScratch(this);
3476 FPRReg fpr = valueOp.fpr();
3477 GPRReg gpr = result.gpr();
3478 compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
3479 value.adopt(result);
3480 } else {
3481#if USE(JSVALUE32_64)
3482 GPRTemporary realPropertyTag(this);
3483 propertyTag.adopt(realPropertyTag);
3484 GPRReg propertyTagGPR = propertyTag.gpr();
3485
3486 GPRTemporary realValueTag(this);
3487 valueTag.adopt(realValueTag);
3488 GPRReg valueTagGPR = valueTag.gpr();
3489#endif
3490 SpeculateDoubleOperand valueOp(this, valueUse);
3491 GPRTemporary result(this);
3492 FPRReg fpr = valueOp.fpr();
3493 GPRReg gpr = result.gpr();
3494 MacroAssembler::Jump notNaN = m_jit.branchIfNotNaN(fpr);
3495 m_jit.xorPtr(gpr, gpr);
3496 MacroAssembler::JumpList fixed(m_jit.jump());
3497 notNaN.link(&m_jit);
3498
3499 fixed.append(m_jit.branchTruncateDoubleToInt32(
3500 fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful));
3501
3502#if USE(JSVALUE64)
3503 m_jit.or64(GPRInfo::numberTagRegister, property);
3504 boxDouble(fpr, gpr);
3505#else
3506 UNUSED_PARAM(property);
3507 m_jit.move(TrustedImm32(JSValue::Int32Tag), propertyTagGPR);
3508 boxDouble(fpr, valueTagGPR, gpr);
3509#endif
3510 slowPathCases.append(m_jit.jump());
3511
3512 fixed.link(&m_jit);
3513 value.adopt(result);
3514 }
3515 break;
3516 }
3517
3518 default:
3519 RELEASE_ASSERT_NOT_REACHED();
3520 break;
3521 }
3522 }
3523 return true;
3524}
3525
3526bool SpeculativeJIT::getIntTypedArrayStoreOperandForAtomics(
3527 GPRTemporary& value,
3528 GPRReg property,
3529#if USE(JSVALUE32_64)
3530 GPRTemporary& propertyTag,
3531 GPRTemporary& valueTag,
3532#endif
3533 Edge valueUse)
3534{
3535 JITCompiler::JumpList slowPathCases;
3536 constexpr bool isClamped = false;
3537 bool result = getIntTypedArrayStoreOperand(
3538 value,
3539 property,
3540#if USE(JSVALUE32_64)
3541 propertyTag,
3542 valueTag,
3543#endif
3544 valueUse,
3545 slowPathCases,
3546 isClamped);
3547 ASSERT(slowPathCases.empty());
3548 return result;
3549}
3550
3551void SpeculativeJIT::compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3552{
3553 ASSERT(isInt(type));
3554
3555 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3556 GPRReg storageReg = storage.gpr();
3557
3558 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3559
3560 GPRTemporary value;
3561#if USE(JSVALUE32_64)
3562 GPRTemporary propertyTag;
3563 GPRTemporary valueTag;
3564#endif
3565
3566 JITCompiler::JumpList slowPathCases;
3567
3568 bool result = getIntTypedArrayStoreOperand(
3569 value, property,
3570#if USE(JSVALUE32_64)
3571 propertyTag, valueTag,
3572#endif
3573 valueUse, slowPathCases, isClamped(type));
3574 if (!result) {
3575 noResult(node);
3576 return;
3577 }
3578
3579 GPRReg valueGPR = value.gpr();
3580#if USE(JSVALUE32_64)
3581 GPRReg propertyTagGPR = propertyTag.gpr();
3582 GPRReg valueTagGPR = valueTag.gpr();
3583#endif
3584
3585 ASSERT_UNUSED(valueGPR, valueGPR != property);
3586 ASSERT(valueGPR != base);
3587 ASSERT(valueGPR != storageReg);
3588 JITCompiler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3589
3590 switch (elementSize(type)) {
3591 case 1:
3592 m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
3593 break;
3594 case 2:
3595 m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
3596 break;
3597 case 4:
3598 m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3599 break;
3600 default:
3601 CRASH();
3602 }
3603
3604 JITCompiler::Jump done = jumpForTypedArrayIsDetachedIfOutOfBounds(node, base, outOfBounds);
3605 if (done.isSet())
3606 done.link(&m_jit);
3607
3608 if (!slowPathCases.empty()) {
3609#if USE(JSVALUE64)
3610 if (node->op() == PutByValDirect) {
3611 addSlowPathGenerator(slowPathCall(
3612 slowPathCases, this,
3613 node->ecmaMode().isStrict() ? operationPutByValDirectStrict : operationPutByValDirectNonStrict,
3614 NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), base, property, valueGPR));
3615 } else {
3616 addSlowPathGenerator(slowPathCall(
3617 slowPathCases, this,
3618 node->ecmaMode().isStrict() ? operationPutByValStrict : operationPutByValNonStrict,
3619 NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), base, property, valueGPR));
3620 }
3621#else // not USE(JSVALUE64)
3622 if (node->op() == PutByValDirect) {
3623 addSlowPathGenerator(slowPathCall(
3624 slowPathCases, this,
3625 node->ecmaMode().isStrict() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict,
3626 NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3627 } else {
3628 addSlowPathGenerator(slowPathCall(
3629 slowPathCases, this,
3630 node->ecmaMode().isStrict() ? operationPutByValCellStrict : operationPutByValCellNonStrict,
3631 NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), base, JSValueRegs(propertyTagGPR, property), JSValueRegs(valueTagGPR, valueGPR)));
3632 }
3633#endif
3634 }
3635
3636 noResult(node);
3637}
3638
3639void SpeculativeJIT::compileGetByValOnFloatTypedArray(Node* node, TypedArrayType type)
3640{
3641 ASSERT(isFloat(type));
3642
3643 SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
3644 SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
3645 StorageOperand storage(this, m_graph.varArgChild(node, 2));
3646
3647 GPRReg baseReg = base.gpr();
3648 GPRReg propertyReg = property.gpr();
3649 GPRReg storageReg = storage.gpr();
3650
3651 FPRTemporary result(this);
3652 FPRReg resultReg = result.fpr();
3653 emitTypedArrayBoundsCheck(node, baseReg, propertyReg);
3654 switch (elementSize(type)) {
3655 case 4:
3656 m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
3657 m_jit.convertFloatToDouble(resultReg, resultReg);
3658 break;
3659 case 8: {
3660 m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
3661 break;
3662 }
3663 default:
3664 RELEASE_ASSERT_NOT_REACHED();
3665 }
3666
3667 doubleResult(resultReg, node);
3668}
3669
3670void SpeculativeJIT::compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node* node, TypedArrayType type)
3671{
3672 ASSERT(isFloat(type));
3673
3674 StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
3675 GPRReg storageReg = storage.gpr();
3676
3677 Edge valueUse = m_jit.graph().varArgChild(node, 2);
3678
3679 SpeculateDoubleOperand valueOp(this, valueUse);
3680 FPRTemporary scratch(this);
3681 FPRReg valueFPR = valueOp.fpr();
3682 FPRReg scratchFPR = scratch.fpr();
3683
3684 MacroAssembler::Jump outOfBounds = jumpForTypedArrayOutOfBounds(node, base, property);
3685
3686 switch (elementSize(type)) {
3687 case 4: {
3688 m_jit.moveDouble(valueFPR, scratchFPR);
3689 m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
3690 m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
3691 break;
3692 }
3693 case 8:
3694 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
3695 break;
3696 default:
3697 RELEASE_ASSERT_NOT_REACHED();
3698 }
3699
3700 JITCompiler::Jump done = jumpForTypedArrayIsDetachedIfOutOfBounds(node, base, outOfBounds);
3701 if (done.isSet())
3702 done.link(&m_jit);
3703 noResult(node);
3704}
3705
3706void SpeculativeJIT::compileGetByValForObjectWithString(Node* node)
3707{
3708 SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3709 SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3710
3711 GPRReg arg1GPR = arg1.gpr();
3712 GPRReg arg2GPR = arg2.gpr();
3713
3714 speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3715 speculateString(m_graph.varArgChild(node, 1), arg2GPR);
3716
3717 flushRegisters();
3718 JSValueRegsFlushedCallResult result(this);
3719 JSValueRegs resultRegs = result.regs();
3720 callOperation(operationGetByValObjectString, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR);
3721 m_jit.exceptionCheck();
3722
3723 jsValueResult(resultRegs, node);
3724}
3725
3726void SpeculativeJIT::compileGetByValForObjectWithSymbol(Node* node)
3727{
3728 SpeculateCellOperand arg1(this, m_graph.varArgChild(node, 0));
3729 SpeculateCellOperand arg2(this, m_graph.varArgChild(node, 1));
3730
3731 GPRReg arg1GPR = arg1.gpr();
3732 GPRReg arg2GPR = arg2.gpr();
3733
3734 speculateObject(m_graph.varArgChild(node, 0), arg1GPR);
3735 speculateSymbol(m_graph.varArgChild(node, 1), arg2GPR);
3736
3737 flushRegisters();
3738 JSValueRegsFlushedCallResult result(this);
3739 JSValueRegs resultRegs = result.regs();
3740 callOperation(operationGetByValObjectSymbol, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR);
3741 m_jit.exceptionCheck();
3742
3743 jsValueResult(resultRegs, node);
3744}
3745
3746void SpeculativeJIT::compileGetPrivateName(Node* node)
3747{
3748 if (node->hasCacheableIdentifier())
3749 return compileGetPrivateNameById(node);
3750
3751 switch (m_graph.child(node, 0).useKind()) {
3752 case CellUse: {
3753 SpeculateCellOperand base(this, m_graph.child(node, 0));
3754 SpeculateCellOperand property(this, m_graph.child(node, 1));
3755
3756 compileGetPrivateNameByVal(node, JSValueRegs::payloadOnly(base.gpr()), JSValueRegs::payloadOnly(property.gpr()));
3757 break;
3758 }
3759 case UntypedUse: {
3760 JSValueOperand base(this, m_graph.child(node, 0));
3761 SpeculateCellOperand property(this, m_graph.child(node, 1));
3762
3763 compileGetPrivateNameByVal(node, base.jsValueRegs(), JSValueRegs::payloadOnly(property.gpr()));
3764 break;
3765 }
3766 default:
3767 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3768 }
3769}
3770
3771void SpeculativeJIT::compileGetPrivateNameByVal(Node* node, JSValueRegs base, JSValueRegs property)
3772{
3773 DFG_ASSERT(m_jit.graph(), node, node->op() == GetPrivateName);
3774 DFG_ASSERT(m_jit.graph(), node, m_graph.child(node, 1).useKind() == SymbolUse);
3775
3776 std::optional<GPRTemporary> stubInfo;
3777 JSValueRegsTemporary result(this);
3778
3779 GPRReg stubInfoGPR = InvalidGPRReg;
3780 if (JITCode::useDataIC(JITType::DFGJIT)) {
3781 stubInfo.emplace(this);
3782 stubInfoGPR = stubInfo->gpr();
3783 }
3784 JSValueRegs resultRegs = result.regs();
3785
3786 speculateSymbol(m_graph.child(node, 1));
3787
3788 CodeOrigin codeOrigin = node->origin.semantic;
3789 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
3790 RegisterSet usedRegisters = this->usedRegisters();
3791
3792 JITCompiler::JumpList slowCases;
3793 const bool baseIsKnownCell = m_state.forNode(m_graph.child(node, 0)).isType(SpecCell);
3794 if (!baseIsKnownCell)
3795 slowCases.append(m_jit.branchIfNotCell(base));
3796
3797 JITGetByValGenerator gen(
3798 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::GetPrivateName, usedRegisters,
3799 base, property, resultRegs, stubInfoGPR);
3800 gen.stubInfo()->propertyIsSymbol = true;
3801 gen.generateFastPath(m_jit);
3802 if (!JITCode::useDataIC(JITType::DFGJIT))
3803 slowCases.append(gen.slowPathJump());
3804
3805 auto makeSlowPathICCall = [&](auto base) {
3806 if (JITCode::useDataIC(JITType::DFGJIT)) {
3807 return slowPathICCall(
3808 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationGetPrivateNameOptimize,
3809 result.regs(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR,
3810 base, CCallHelpers::CellValue(property.payloadGPR()));
3811 }
3812 return slowPathCall(
3813 slowCases, this, operationGetPrivateNameOptimize,
3814 result.regs(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(),
3815 base, CCallHelpers::CellValue(property.payloadGPR()));
3816 };
3817
3818 std::unique_ptr<SlowPathGenerator> slowPath = baseIsKnownCell
3819 ? makeSlowPathICCall(CCallHelpers::CellValue(base.payloadGPR()))
3820 : makeSlowPathICCall(base);
3821
3822 m_jit.addGetByVal(gen, slowPath.get());
3823 addSlowPathGenerator(WTFMove(slowPath));
3824
3825 jsValueResult(result.regs(), node, DataFormatJS);
3826}
3827
3828void SpeculativeJIT::compileGetPrivateNameById(Node* node)
3829{
3830 switch (m_graph.child(node, 0).useKind()) {
3831 case CellUse: {
3832 std::optional<GPRTemporary> stubInfo;
3833 SpeculateCellOperand base(this, m_graph.child(node, 0));
3834 JSValueRegsTemporary result(this, Reuse, base);
3835
3836 GPRReg stubInfoGPR = InvalidGPRReg;
3837 if (JITCode::useDataIC(JITType::DFGJIT)) {
3838 stubInfo.emplace(this);
3839 stubInfoGPR = stubInfo->gpr();
3840 }
3841 JSValueRegs baseRegs = JSValueRegs::payloadOnly(base.gpr());
3842 JSValueRegs resultRegs = result.regs();
3843
3844 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), JITCompiler::Jump(), NeedToSpill, AccessType::GetPrivateName);
3845
3846 jsValueResult(resultRegs, node, DataFormatJS);
3847 break;
3848 }
3849
3850 case UntypedUse: {
3851 std::optional<GPRTemporary> stubInfo;
3852 JSValueOperand base(this, m_graph.child(node, 0));
3853 JSValueRegsTemporary result(this, Reuse, base);
3854
3855 GPRReg stubInfoGPR = InvalidGPRReg;
3856 if (JITCode::useDataIC(JITType::DFGJIT)) {
3857 stubInfo.emplace(this);
3858 stubInfoGPR = stubInfo->gpr();
3859 }
3860 JSValueRegs baseRegs = base.jsValueRegs();
3861 JSValueRegs resultRegs = result.regs();
3862
3863 JITCompiler::Jump notCell = m_jit.branchIfNotCell(baseRegs);
3864
3865 cachedGetById(node->origin.semantic, baseRegs, resultRegs, stubInfoGPR, node->cacheableIdentifier(), notCell, NeedToSpill, AccessType::GetPrivateName);
3866
3867 jsValueResult(resultRegs, node, DataFormatJS);
3868 break;
3869 }
3870
3871 default:
3872 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
3873 break;
3874 }
3875}
3876
3877void SpeculativeJIT::compilePutByValForCellWithString(Node* node, Edge& child1, Edge& child2, Edge& child3)
3878{
3879 SpeculateCellOperand arg1(this, child1);
3880 SpeculateCellOperand arg2(this, child2);
3881 JSValueOperand arg3(this, child3);
3882
3883 GPRReg arg1GPR = arg1.gpr();
3884 GPRReg arg2GPR = arg2.gpr();
3885 JSValueRegs arg3Regs = arg3.jsValueRegs();
3886
3887 speculateString(child2, arg2GPR);
3888
3889 flushRegisters();
3890 callOperation(
3891 node->ecmaMode().isStrict()
3892 ? (node->op() == PutByValDirect ? operationPutByValDirectCellStringStrict : operationPutByValCellStringStrict)
3893 : (node->op() == PutByValDirect ? operationPutByValDirectCellStringNonStrict : operationPutByValCellStringNonStrict),
3894 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR, arg3Regs);
3895 m_jit.exceptionCheck();
3896
3897 noResult(node);
3898}
3899
3900void SpeculativeJIT::compilePutByValForCellWithSymbol(Node* node, Edge& child1, Edge& child2, Edge& child3)
3901{
3902 SpeculateCellOperand arg1(this, child1);
3903 SpeculateCellOperand arg2(this, child2);
3904 JSValueOperand arg3(this, child3);
3905
3906 GPRReg arg1GPR = arg1.gpr();
3907 GPRReg arg2GPR = arg2.gpr();
3908 JSValueRegs arg3Regs = arg3.jsValueRegs();
3909
3910 speculateSymbol(child2, arg2GPR);
3911
3912 flushRegisters();
3913 callOperation(
3914 node->ecmaMode().isStrict()
3915 ? (node->op() == PutByValDirect ? operationPutByValDirectCellSymbolStrict : operationPutByValCellSymbolStrict)
3916 : (node->op() == PutByValDirect ? operationPutByValDirectCellSymbolNonStrict : operationPutByValCellSymbolNonStrict),
3917 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1GPR, arg2GPR, arg3Regs);
3918 m_jit.exceptionCheck();
3919
3920 noResult(node);
3921}
3922
3923void SpeculativeJIT::compileGetByValWithThis(Node* node)
3924{
3925 JSValueOperand base(this, node->child1());
3926 JSValueRegs baseRegs = base.jsValueRegs();
3927 JSValueOperand thisValue(this, node->child2());
3928 JSValueRegs thisValueRegs = thisValue.jsValueRegs();
3929 JSValueOperand subscript(this, node->child3());
3930 JSValueRegs subscriptRegs = subscript.jsValueRegs();
3931
3932 flushRegisters();
3933 JSValueRegsFlushedCallResult result(this);
3934 JSValueRegs resultRegs = result.regs();
3935 callOperation(operationGetByValWithThis, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs, thisValueRegs, subscriptRegs);
3936 m_jit.exceptionCheck();
3937
3938 jsValueResult(resultRegs, node);
3939}
3940
3941void SpeculativeJIT::compilePutPrivateName(Node* node)
3942{
3943 ASSERT(node->child1().useKind() == UntypedUse);
3944 JSValueOperand base(this, node->child1());
3945 SpeculateCellOperand propertyValue(this, node->child2());
3946 JSValueOperand value(this, node->child3());
3947
3948 JSValueRegs valueRegs = value.jsValueRegs();
3949 JSValueRegs baseRegs = base.jsValueRegs();
3950
3951 GPRReg propertyGPR = propertyValue.gpr();
3952
3953 speculateSymbol(node->child2(), propertyGPR);
3954
3955 flushRegisters();
3956 callOperation(operationPutPrivateNameGeneric, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs, CCallHelpers::CellValue(propertyGPR), valueRegs, TrustedImmPtr(nullptr), TrustedImm32(node->privateFieldPutKind().value()));
3957 m_jit.exceptionCheck();
3958
3959 noResult(node);
3960}
3961
3962void SpeculativeJIT::compilePutPrivateNameById(Node* node)
3963{
3964 std::optional<GPRTemporary> stubInfo;
3965 SpeculateCellOperand base(this, node->child1());
3966 JSValueOperand value(this, node->child2());
3967 GPRTemporary scratch(this);
3968
3969 GPRReg stubInfoGPR = InvalidGPRReg;
3970 if (JITCode::useDataIC(JITType::DFGJIT)) {
3971 stubInfo.emplace(this);
3972 stubInfoGPR = stubInfo->gpr();
3973 }
3974 JSValueRegs valueRegs = value.jsValueRegs();
3975 GPRReg baseGPR = base.gpr();
3976 GPRReg scratchGPR = scratch.gpr();
3977
3978 // We emit property check during DFG generation, so we don't need
3979 // to check it here.
3980 auto putKind = node->privateFieldPutKind().isDefine() ? PutKind::DirectPrivateFieldDefine : PutKind::DirectPrivateFieldSet;
3981 cachedPutById(node->origin.semantic, baseGPR, valueRegs, stubInfoGPR, scratchGPR, node->cacheableIdentifier(), putKind, ECMAMode::strict());
3982
3983 noResult(node);
3984}
3985
3986void SpeculativeJIT::compileCheckPrivateBrand(Node* node)
3987{
3988 std::optional<GPRTemporary> stubInfo;
3989 JSValueOperand base(this, node->child1());
3990 SpeculateCellOperand brandValue(this, node->child2());
3991
3992 GPRReg stubInfoGPR = InvalidGPRReg;
3993 if (JITCode::useDataIC(JITType::DFGJIT)) {
3994 stubInfo.emplace(this);
3995 stubInfoGPR = stubInfo->gpr();
3996 }
3997 JSValueRegs baseRegs = base.jsValueRegs();
3998 GPRReg brandGPR = brandValue.gpr();
3999
4000 speculateSymbol(node->child2(), brandGPR);
4001
4002 CodeOrigin codeOrigin = node->origin.semantic;
4003 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
4004 RegisterSet usedRegisters = this->usedRegisters();
4005
4006 JITCompiler::JumpList slowCases;
4007 if (needsTypeCheck(node->child1(), SpecCell))
4008 slowCases.append(m_jit.branchIfNotCell(baseRegs));
4009
4010 JITPrivateBrandAccessGenerator gen(
4011 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::CheckPrivateBrand, usedRegisters,
4012 baseRegs, JSValueRegs::payloadOnly(brandGPR), stubInfoGPR);
4013
4014 gen.stubInfo()->propertyIsSymbol = true;
4015 gen.generateFastPath(m_jit);
4016 if (!JITCode::useDataIC(JITType::DFGJIT))
4017 slowCases.append(gen.slowPathJump());
4018
4019 std::unique_ptr<SlowPathGenerator> slowPath;
4020 if (JITCode::useDataIC(JITType::DFGJIT)) {
4021 slowPath = slowPathICCall(
4022 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationCheckPrivateBrandOptimize, NoResult,
4023 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, baseRegs, CCallHelpers::CellValue(brandGPR));
4024 } else {
4025 slowPath = slowPathCall(
4026 slowCases, this, operationCheckPrivateBrandOptimize, NoResult,
4027 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), baseRegs, CCallHelpers::CellValue(brandGPR));
4028 }
4029
4030 m_jit.addPrivateBrandAccess(gen, slowPath.get());
4031 addSlowPathGenerator(WTFMove(slowPath));
4032
4033 noResult(node);
4034}
4035
4036void SpeculativeJIT::compileSetPrivateBrand(Node* node)
4037{
4038 ASSERT(node->child1().useKind() == CellUse);
4039 std::optional<GPRTemporary> stubInfo;
4040 SpeculateCellOperand base(this, node->child1());
4041 SpeculateCellOperand brandValue(this, node->child2());
4042
4043 GPRReg stubInfoGPR = InvalidGPRReg;
4044 if (JITCode::useDataIC(JITType::DFGJIT)) {
4045 stubInfo.emplace(this);
4046 stubInfoGPR = stubInfo->gpr();
4047 }
4048 GPRReg baseGPR = base.gpr();
4049 GPRReg brandGPR = brandValue.gpr();
4050
4051 speculateSymbol(node->child2(), brandGPR);
4052
4053 CodeOrigin codeOrigin = node->origin.semantic;
4054 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
4055 RegisterSet usedRegisters = this->usedRegisters();
4056
4057 JITCompiler::JumpList slowCases;
4058 JITPrivateBrandAccessGenerator gen(
4059 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, AccessType::SetPrivateBrand, usedRegisters,
4060 JSValueRegs::payloadOnly(baseGPR), JSValueRegs::payloadOnly(brandGPR), stubInfoGPR);
4061
4062 gen.stubInfo()->propertyIsSymbol = true;
4063 gen.generateFastPath(m_jit);
4064 if (!JITCode::useDataIC(JITType::DFGJIT))
4065 slowCases.append(gen.slowPathJump());
4066
4067 std::unique_ptr<SlowPathGenerator> slowPath;
4068 if (JITCode::useDataIC(JITType::DFGJIT)) {
4069 slowPath = slowPathICCall(
4070 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationSetPrivateBrandOptimize, NoResult,
4071 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, CCallHelpers::CellValue(baseGPR), CCallHelpers::CellValue(brandGPR));
4072 } else {
4073 slowPath = slowPathCall(
4074 slowCases, this, operationSetPrivateBrandOptimize, NoResult,
4075 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), CCallHelpers::CellValue(baseGPR), CCallHelpers::CellValue(brandGPR));
4076 }
4077
4078 m_jit.addPrivateBrandAccess(gen, slowPath.get());
4079 addSlowPathGenerator(WTFMove(slowPath));
4080
4081 noResult(node);
4082}
4083
4084void SpeculativeJIT::compileCheckTypeInfoFlags(Node* node)
4085{
4086 SpeculateCellOperand base(this, node->child1());
4087
4088 GPRReg baseGPR = base.gpr();
4089
4090 // FIXME: This only works for checking if a single bit is set. If we want to check more
4091 // than one bit at once, we'll need to fix this:
4092 // https://bugs.webkit.org/show_bug.cgi?id=185705
4093 speculationCheck(BadTypeInfoFlags, JSValueRegs(), nullptr, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(node->typeInfoOperand())));
4094
4095 noResult(node);
4096}
4097
4098void SpeculativeJIT::compileParseInt(Node* node)
4099{
4100 RELEASE_ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == StringUse);
4101 if (node->child2()) {
4102 SpeculateInt32Operand radix(this, node->child2());
4103 GPRReg radixGPR = radix.gpr();
4104 if (node->child1().useKind() == UntypedUse) {
4105 JSValueOperand value(this, node->child1());
4106 JSValueRegs valueRegs = value.jsValueRegs();
4107
4108 flushRegisters();
4109 JSValueRegsFlushedCallResult result(this);
4110 JSValueRegs resultRegs = result.regs();
4111 callOperation(operationParseIntGeneric, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs, radixGPR);
4112 m_jit.exceptionCheck();
4113 jsValueResult(resultRegs, node);
4114 return;
4115 }
4116
4117 SpeculateCellOperand value(this, node->child1());
4118 GPRReg valueGPR = value.gpr();
4119 speculateString(node->child1(), valueGPR);
4120
4121 flushRegisters();
4122 JSValueRegsFlushedCallResult result(this);
4123 JSValueRegs resultRegs = result.regs();
4124 callOperation(operationParseIntString, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueGPR, radixGPR);
4125 m_jit.exceptionCheck();
4126 jsValueResult(resultRegs, node);
4127 return;
4128 }
4129
4130 if (node->child1().useKind() == UntypedUse) {
4131 JSValueOperand value(this, node->child1());
4132 JSValueRegs valueRegs = value.jsValueRegs();
4133
4134 flushRegisters();
4135 JSValueRegsFlushedCallResult result(this);
4136 JSValueRegs resultRegs = result.regs();
4137 callOperation(operationParseIntNoRadixGeneric, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs);
4138 m_jit.exceptionCheck();
4139 jsValueResult(resultRegs, node);
4140 return;
4141 }
4142
4143 SpeculateCellOperand value(this, node->child1());
4144 GPRReg valueGPR = value.gpr();
4145 speculateString(node->child1(), valueGPR);
4146
4147 flushRegisters();
4148 JSValueRegsFlushedCallResult result(this);
4149 JSValueRegs resultRegs = result.regs();
4150 callOperation(operationParseIntStringNoRadix, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueGPR);
4151 m_jit.exceptionCheck();
4152 jsValueResult(resultRegs, node);
4153}
4154
4155void SpeculativeJIT::compileOverridesHasInstance(Node* node)
4156{
4157 Node* hasInstanceValueNode = node->child2().node();
4158 JSFunction* defaultHasInstanceFunction = jsCast<JSFunction*>(node->cellOperand()->value());
4159
4160 MacroAssembler::JumpList notDefault;
4161 SpeculateCellOperand base(this, node->child1());
4162 JSValueOperand hasInstanceValue(this, node->child2());
4163 GPRTemporary result(this);
4164
4165 GPRReg baseGPR = base.gpr();
4166 GPRReg resultGPR = result.gpr();
4167
4168 // It would be great if constant folding handled automatically the case where we knew the hasInstance function
4169 // was a constant. Unfortunately, the folding rule for OverridesHasInstance is in the strength reduction phase
4170 // since it relies on OSR information. https://bugs.webkit.org/show_bug.cgi?id=154832
4171 if (!hasInstanceValueNode->isCellConstant() || defaultHasInstanceFunction != hasInstanceValueNode->asCell()) {
4172 JSValueRegs hasInstanceValueRegs = hasInstanceValue.jsValueRegs();
4173#if USE(JSVALUE64)
4174 notDefault.append(m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.gpr(), TrustedImmPtr(node->cellOperand())));
4175#else
4176 notDefault.append(m_jit.branchIfNotCell(hasInstanceValueRegs));
4177 notDefault.append(m_jit.branchPtr(MacroAssembler::NotEqual, hasInstanceValueRegs.payloadGPR(), TrustedImmPtr(node->cellOperand())));
4178#endif
4179 }
4180
4181 // Check that base 'ImplementsDefaultHasInstance'.
4182 m_jit.test8(MacroAssembler::Zero, MacroAssembler::Address(baseGPR, JSCell::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance), resultGPR);
4183 MacroAssembler::Jump done = m_jit.jump();
4184
4185 if (!notDefault.empty()) {
4186 notDefault.link(&m_jit);
4187 m_jit.move(TrustedImm32(1), resultGPR);
4188 }
4189
4190 done.link(&m_jit);
4191 unblessedBooleanResult(resultGPR, node);
4192}
4193
4194void SpeculativeJIT::compileInstanceOfForCells(Node* node, JSValueRegs valueRegs, JSValueRegs prototypeRegs, GPRReg resultGPR, GPRReg stubInfoGPR, GPRReg scratchGPR, GPRReg scratch2GPR, JITCompiler::Jump slowCase)
4195{
4196 CallSiteIndex callSiteIndex = m_jit.addCallSite(node->origin.semantic);
4197
4198 JITInstanceOfGenerator gen(
4199 m_jit.codeBlock(), JITType::DFGJIT, node->origin.semantic, callSiteIndex, usedRegisters(), resultGPR,
4200 valueRegs.payloadGPR(), prototypeRegs.payloadGPR(), stubInfoGPR, scratchGPR, scratch2GPR,
4201 m_state.forNode(node->child2()).isType(SpecObject | ~SpecCell));
4202 gen.generateFastPath(m_jit);
4203 JITCompiler::JumpList slowCases;
4204 if (!JITCode::useDataIC(JITType::DFGJIT))
4205 slowCases.append(gen.slowPathJump());
4206 slowCases.append(slowCase);
4207
4208 std::unique_ptr<SlowPathGenerator> slowPath;
4209 if (JITCode::useDataIC(JITType::DFGJIT)) {
4210 slowPath = slowPathICCall(
4211 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), operationInstanceOfOptimize, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stubInfoGPR, valueRegs, prototypeRegs);
4212 } else {
4213 slowPath = slowPathCall(
4214 slowCases, this, operationInstanceOfOptimize, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), gen.stubInfo(), valueRegs, prototypeRegs);
4215 }
4216
4217 m_jit.addInstanceOf(gen, slowPath.get());
4218 addSlowPathGenerator(WTFMove(slowPath));
4219}
4220
4221void SpeculativeJIT::compileInstanceOf(Node* node)
4222{
4223#if USE(JSVALUE64)
4224 if (node->child1().useKind() == CellUse
4225 && node->child2().useKind() == CellUse) {
4226 std::optional<GPRTemporary> stubInfo;
4227 SpeculateCellOperand value(this, node->child1());
4228 SpeculateCellOperand prototype(this, node->child2());
4229
4230 GPRTemporary result(this);
4231 GPRTemporary scratch(this);
4232 GPRTemporary scratch2(this);
4233
4234 GPRReg stubInfoGPR = InvalidGPRReg;
4235 if (JITCode::useDataIC(JITType::DFGJIT)) {
4236 stubInfo.emplace(this);
4237 stubInfoGPR = stubInfo->gpr();
4238 }
4239 GPRReg valueGPR = value.gpr();
4240 GPRReg prototypeGPR = prototype.gpr();
4241 GPRReg resultGPR = result.gpr();
4242 GPRReg scratchGPR = scratch.gpr();
4243 GPRReg scratch2GPR = scratch2.gpr();
4244
4245 compileInstanceOfForCells(node, JSValueRegs(valueGPR), JSValueRegs(prototypeGPR), resultGPR, stubInfoGPR, scratchGPR, scratch2GPR);
4246
4247 blessedBooleanResult(resultGPR, node);
4248 return;
4249 }
4250#endif
4251
4252 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse);
4253 DFG_ASSERT(m_jit.graph(), node, node->child2().useKind() == UntypedUse);
4254
4255 std::optional<GPRTemporary> stubInfo;
4256 JSValueOperand value(this, node->child1());
4257 JSValueOperand prototype(this, node->child2());
4258
4259 GPRTemporary result(this);
4260 GPRTemporary scratch(this);
4261
4262 GPRReg stubInfoGPR = InvalidGPRReg;
4263 if (JITCode::useDataIC(JITType::DFGJIT)) {
4264 stubInfo.emplace(this);
4265 stubInfoGPR = stubInfo->gpr();
4266 }
4267 JSValueRegs valueRegs = value.jsValueRegs();
4268 JSValueRegs prototypeRegs = prototype.jsValueRegs();
4269
4270 GPRReg resultGPR = result.gpr();
4271 GPRReg scratchGPR = scratch.gpr();
4272
4273 JITCompiler::Jump isCell = m_jit.branchIfCell(valueRegs);
4274 moveFalseTo(resultGPR);
4275
4276 JITCompiler::Jump done = m_jit.jump();
4277
4278 isCell.link(&m_jit);
4279
4280 JITCompiler::Jump slowCase = m_jit.branchIfNotCell(prototypeRegs);
4281
4282 compileInstanceOfForCells(node, valueRegs, prototypeRegs, resultGPR, stubInfoGPR, scratchGPR, InvalidGPRReg, slowCase);
4283
4284 done.link(&m_jit);
4285 blessedBooleanResult(resultGPR, node);
4286 return;
4287}
4288
4289void SpeculativeJIT::compileValueBitNot(Node* node)
4290{
4291 Edge& child1 = node->child1();
4292
4293#if USE(BIGINT32)
4294 if (child1.useKind() == BigInt32Use) {
4295 SpeculateBigInt32Operand operand(this, child1);
4296 GPRTemporary result(this);
4297 GPRReg resultGPR = result.gpr();
4298
4299 // The following trick relies on details of the representation of BigInt32, and will have to be updated if we move bits around.
4300 static_assert(JSValue::BigInt32Tag == 0x12);
4301 static_assert(JSValue::BigInt32Mask == static_cast<int64_t>(0xfffe000000000012));
4302 constexpr uint64_t maskForBigInt32Bits = 0x0000ffffffff0000;
4303 static_assert(!(JSValue::BigInt32Mask & maskForBigInt32Bits));
4304 m_jit.move(TrustedImm64(maskForBigInt32Bits), resultGPR);
4305 m_jit.xor64(operand.gpr(), resultGPR);
4306
4307 jsValueResult(resultGPR, node);
4308
4309 return;
4310 }
4311 // FIXME: add support for mixed BigInt32 / HeapBigInt
4312#endif
4313
4314 if (child1.useKind() == HeapBigIntUse) {
4315 SpeculateCellOperand operand(this, child1);
4316 GPRReg operandGPR = operand.gpr();
4317
4318 speculateHeapBigInt(child1, operandGPR);
4319
4320 flushRegisters();
4321 JSValueRegsFlushedCallResult result(this);
4322 JSValueRegs resultRegs = result.regs();
4323
4324 callOperation(operationBitNotHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), operandGPR);
4325 m_jit.exceptionCheck();
4326 jsValueResult(resultRegs, node);
4327
4328 return;
4329 }
4330
4331 ASSERT(child1.useKind() == UntypedUse || child1.useKind() == AnyBigIntUse);
4332 JSValueOperand operand(this, child1, ManualOperandSpeculation);
4333 speculate(node, child1); // Required for the AnyBigIntUse case
4334 JSValueRegs operandRegs = operand.jsValueRegs();
4335
4336 flushRegisters();
4337 JSValueRegsFlushedCallResult result(this);
4338 JSValueRegs resultRegs = result.regs();
4339 callOperation(operationValueBitNot, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), operandRegs);
4340 m_jit.exceptionCheck();
4341
4342 jsValueResult(resultRegs, node);
4343}
4344
4345void SpeculativeJIT::compileBitwiseNot(Node* node)
4346{
4347 Edge& child1 = node->child1();
4348
4349 SpeculateInt32Operand operand(this, child1);
4350 GPRTemporary result(this);
4351 GPRReg resultGPR = result.gpr();
4352
4353 m_jit.move(operand.gpr(), resultGPR);
4354
4355 m_jit.not32(resultGPR);
4356
4357 strictInt32Result(resultGPR, node);
4358}
4359
4360template<typename SnippetGenerator, J_JITOperation_GJJ snippetSlowPathFunction>
4361void SpeculativeJIT::emitUntypedOrAnyBigIntBitOp(Node* node)
4362{
4363 Edge& leftChild = node->child1();
4364 Edge& rightChild = node->child2();
4365
4366 DFG_ASSERT(m_jit.graph(), node, node->isBinaryUseKind(UntypedUse) || node->isBinaryUseKind(AnyBigIntUse) || node->isBinaryUseKind(HeapBigIntUse) || node->isBinaryUseKind(BigInt32Use));
4367
4368 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
4369 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
4370 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
4371 speculate(node, leftChild);
4372 speculate(node, rightChild);
4373 JSValueRegs leftRegs = left.jsValueRegs();
4374 JSValueRegs rightRegs = right.jsValueRegs();
4375
4376 flushRegisters();
4377 JSValueRegsFlushedCallResult result(this);
4378 JSValueRegs resultRegs = result.regs();
4379 callOperation(snippetSlowPathFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4380 m_jit.exceptionCheck();
4381
4382 jsValueResult(resultRegs, node);
4383 return;
4384 }
4385
4386 std::optional<JSValueOperand> left;
4387 std::optional<JSValueOperand> right;
4388
4389 JSValueRegs leftRegs;
4390 JSValueRegs rightRegs;
4391
4392#if USE(JSVALUE64)
4393 GPRTemporary result(this);
4394 JSValueRegs resultRegs = JSValueRegs(result.gpr());
4395 GPRTemporary scratch(this);
4396 GPRReg scratchGPR = scratch.gpr();
4397#else
4398 GPRTemporary resultTag(this);
4399 GPRTemporary resultPayload(this);
4400 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
4401 GPRReg scratchGPR = resultTag.gpr();
4402#endif
4403
4404 SnippetOperand leftOperand;
4405 SnippetOperand rightOperand;
4406
4407 // The snippet generator does not support both operands being constant. If the left
4408 // operand is already const, we'll ignore the right operand's constness.
4409 if (leftChild->isInt32Constant())
4410 leftOperand.setConstInt32(leftChild->asInt32());
4411 else if (rightChild->isInt32Constant())
4412 rightOperand.setConstInt32(rightChild->asInt32());
4413
4414 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
4415
4416 if (!leftOperand.isConst()) {
4417 left.emplace(this, leftChild, ManualOperandSpeculation);
4418 speculate(node, leftChild); // Required for AnyBigIntUse
4419 leftRegs = left->jsValueRegs();
4420 }
4421 if (!rightOperand.isConst()) {
4422 right.emplace(this, rightChild, ManualOperandSpeculation);
4423 speculate(node, rightChild); // Required for AnyBigIntUse
4424 rightRegs = right->jsValueRegs();
4425 }
4426
4427 SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
4428 gen.generateFastPath(m_jit);
4429
4430 ASSERT(gen.didEmitFastPath());
4431 gen.endJumpList().append(m_jit.jump());
4432
4433 gen.slowPathJumpList().link(&m_jit);
4434 silentSpillAllRegisters(resultRegs);
4435
4436 if (leftOperand.isConst()) {
4437 leftRegs = resultRegs;
4438 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
4439 } else if (rightOperand.isConst()) {
4440 rightRegs = resultRegs;
4441 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
4442 }
4443
4444 callOperation(snippetSlowPathFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4445
4446 silentFillAllRegisters();
4447 m_jit.exceptionCheck();
4448
4449 gen.endJumpList().link(&m_jit);
4450 jsValueResult(resultRegs, node);
4451}
4452
4453void SpeculativeJIT::compileValueBitwiseOp(Node* node)
4454{
4455 NodeType op = node->op();
4456 Edge& leftChild = node->child1();
4457 Edge& rightChild = node->child2();
4458
4459#if USE(BIGINT32)
4460 if (leftChild.useKind() == BigInt32Use && rightChild.useKind() == BigInt32Use) {
4461 SpeculateBigInt32Operand left(this, leftChild);
4462 SpeculateBigInt32Operand right(this, rightChild);
4463 GPRTemporary result(this);
4464 GPRReg resultGPR = result.gpr();
4465
4466 m_jit.move(left.gpr(), resultGPR);
4467
4468 switch (op) {
4469 case ValueBitAnd:
4470 // No need to unbox/box: bitAnd does not interfere with the encoding of BigInt32
4471 m_jit.and64(right.gpr(), resultGPR);
4472 break;
4473 case ValueBitOr:
4474 // No need to unbox/box: bitOr does not interfere with the encoding of BigInt32
4475 m_jit.or64(right.gpr(), resultGPR);
4476 break;
4477 case ValueBitXor:
4478 // BitXor removes the tag, so we must add it back after doing the operation
4479 m_jit.xor64(right.gpr(), resultGPR);
4480 m_jit.or64(TrustedImm32(JSValue::BigInt32Tag), resultGPR);
4481 break;
4482 default:
4483 RELEASE_ASSERT_NOT_REACHED();
4484 }
4485
4486 jsValueResult(resultGPR, node);
4487 return;
4488 }
4489 // FIXME: add support for mixed BigInt32 / HeapBigInt
4490#endif
4491
4492 if (node->isBinaryUseKind(HeapBigIntUse)) {
4493 SpeculateCellOperand left(this, node->child1());
4494 SpeculateCellOperand right(this, node->child2());
4495 GPRReg leftGPR = left.gpr();
4496 GPRReg rightGPR = right.gpr();
4497
4498 speculateHeapBigInt(leftChild, leftGPR);
4499 speculateHeapBigInt(rightChild, rightGPR);
4500
4501 flushRegisters();
4502 JSValueRegsFlushedCallResult result(this);
4503 JSValueRegs resultRegs = result.regs();
4504
4505 switch (op) {
4506 case ValueBitAnd:
4507 callOperation(operationBitAndHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4508 break;
4509 case ValueBitXor:
4510 callOperation(operationBitXorHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4511 break;
4512 case ValueBitOr:
4513 callOperation(operationBitOrHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4514 break;
4515 default:
4516 RELEASE_ASSERT_NOT_REACHED();
4517 }
4518
4519 m_jit.exceptionCheck();
4520 jsValueResult(resultRegs, node);
4521 return;
4522 }
4523
4524 switch (op) {
4525 case ValueBitAnd:
4526 emitUntypedOrAnyBigIntBitOp<JITBitAndGenerator, operationValueBitAnd>(node);
4527 return;
4528 case ValueBitXor:
4529 emitUntypedOrAnyBigIntBitOp<JITBitXorGenerator, operationValueBitXor>(node);
4530 return;
4531 case ValueBitOr:
4532 emitUntypedOrAnyBigIntBitOp<JITBitOrGenerator, operationValueBitOr>(node);
4533 return;
4534 default:
4535 RELEASE_ASSERT_NOT_REACHED();
4536 }
4537}
4538
4539void SpeculativeJIT::compileBitwiseOp(Node* node)
4540{
4541 NodeType op = node->op();
4542 Edge& leftChild = node->child1();
4543 Edge& rightChild = node->child2();
4544
4545 if (leftChild->isInt32Constant()) {
4546 SpeculateInt32Operand op2(this, rightChild);
4547 GPRTemporary result(this, Reuse, op2);
4548
4549 bitOp(op, leftChild->asInt32(), op2.gpr(), result.gpr());
4550
4551 strictInt32Result(result.gpr(), node);
4552 return;
4553 }
4554
4555 if (rightChild->isInt32Constant()) {
4556 SpeculateInt32Operand op1(this, leftChild);
4557 GPRTemporary result(this, Reuse, op1);
4558
4559 bitOp(op, rightChild->asInt32(), op1.gpr(), result.gpr());
4560
4561 strictInt32Result(result.gpr(), node);
4562 return;
4563 }
4564
4565 SpeculateInt32Operand op1(this, leftChild);
4566 SpeculateInt32Operand op2(this, rightChild);
4567 GPRTemporary result(this, Reuse, op1, op2);
4568
4569 GPRReg reg1 = op1.gpr();
4570 GPRReg reg2 = op2.gpr();
4571 bitOp(op, reg1, reg2, result.gpr());
4572
4573 strictInt32Result(result.gpr(), node);
4574}
4575
4576void SpeculativeJIT::emitUntypedOrBigIntRightShiftBitOp(Node* node)
4577{
4578 J_JITOperation_GJJ snippetSlowPathFunction = node->op() == ValueBitRShift
4579 ? operationValueBitRShift : operationValueBitURShift;
4580 JITRightShiftGenerator::ShiftType shiftType = node->op() == ValueBitRShift
4581 ? JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
4582
4583 Edge& leftChild = node->child1();
4584 Edge& rightChild = node->child2();
4585
4586 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node()) || node->isBinaryUseKind(BigInt32Use) || node->isBinaryUseKind(AnyBigIntUse)) {
4587 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
4588 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
4589 speculate(node, leftChild);
4590 speculate(node, rightChild);
4591 JSValueRegs leftRegs = left.jsValueRegs();
4592 JSValueRegs rightRegs = right.jsValueRegs();
4593
4594 flushRegisters();
4595 JSValueRegsFlushedCallResult result(this);
4596 JSValueRegs resultRegs = result.regs();
4597 callOperation(snippetSlowPathFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4598 m_jit.exceptionCheck();
4599
4600 jsValueResult(resultRegs, node);
4601 return;
4602 }
4603
4604 std::optional<JSValueOperand> left;
4605 std::optional<JSValueOperand> right;
4606
4607 JSValueRegs leftRegs;
4608 JSValueRegs rightRegs;
4609
4610 FPRTemporary leftNumber(this);
4611 FPRReg leftFPR = leftNumber.fpr();
4612
4613#if USE(JSVALUE64)
4614 GPRTemporary result(this);
4615 JSValueRegs resultRegs = JSValueRegs(result.gpr());
4616 GPRTemporary scratch(this);
4617 GPRReg scratchGPR = scratch.gpr();
4618 FPRReg scratchFPR = InvalidFPRReg;
4619#else
4620 GPRTemporary resultTag(this);
4621 GPRTemporary resultPayload(this);
4622 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
4623 GPRReg scratchGPR = resultTag.gpr();
4624 FPRTemporary fprScratch(this);
4625 FPRReg scratchFPR = fprScratch.fpr();
4626#endif
4627
4628 SnippetOperand leftOperand;
4629 SnippetOperand rightOperand;
4630
4631 // The snippet generator does not support both operands being constant. If the left
4632 // operand is already const, we'll ignore the right operand's constness.
4633 if (leftChild->isInt32Constant())
4634 leftOperand.setConstInt32(leftChild->asInt32());
4635 else if (rightChild->isInt32Constant())
4636 rightOperand.setConstInt32(rightChild->asInt32());
4637
4638 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
4639
4640 if (!leftOperand.isConst()) {
4641 left.emplace(this, leftChild);
4642 leftRegs = left->jsValueRegs();
4643 }
4644 if (!rightOperand.isConst()) {
4645 right.emplace(this, rightChild);
4646 rightRegs = right->jsValueRegs();
4647 }
4648
4649 JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
4650 leftFPR, scratchGPR, scratchFPR, shiftType);
4651 gen.generateFastPath(m_jit);
4652
4653 ASSERT(gen.didEmitFastPath());
4654 gen.endJumpList().append(m_jit.jump());
4655
4656 gen.slowPathJumpList().link(&m_jit);
4657 silentSpillAllRegisters(resultRegs);
4658
4659 if (leftOperand.isConst()) {
4660 leftRegs = resultRegs;
4661 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
4662 } else if (rightOperand.isConst()) {
4663 rightRegs = resultRegs;
4664 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
4665 }
4666
4667 callOperation(snippetSlowPathFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4668
4669 silentFillAllRegisters();
4670 m_jit.exceptionCheck();
4671
4672 gen.endJumpList().link(&m_jit);
4673 jsValueResult(resultRegs, node);
4674 return;
4675}
4676
4677void SpeculativeJIT::compileValueLShiftOp(Node* node)
4678{
4679 Edge& leftChild = node->child1();
4680 Edge& rightChild = node->child2();
4681
4682 // FIXME: support BigInt32
4683 if (node->binaryUseKind() == HeapBigIntUse) {
4684 SpeculateCellOperand left(this, leftChild);
4685 SpeculateCellOperand right(this, rightChild);
4686 GPRReg leftGPR = left.gpr();
4687 GPRReg rightGPR = right.gpr();
4688
4689 speculateHeapBigInt(leftChild, leftGPR);
4690 speculateHeapBigInt(rightChild, rightGPR);
4691
4692 flushRegisters();
4693 JSValueRegsFlushedCallResult result(this);
4694 JSValueRegs resultRegs = result.regs();
4695
4696 callOperation(operationBitLShiftHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4697 m_jit.exceptionCheck();
4698 jsValueResult(resultRegs, node);
4699 return;
4700 }
4701
4702 emitUntypedOrAnyBigIntBitOp<JITLeftShiftGenerator, operationValueBitLShift>(node);
4703}
4704
4705void SpeculativeJIT::compileValueBitRShift(Node* node)
4706{
4707 Edge& leftChild = node->child1();
4708 Edge& rightChild = node->child2();
4709
4710 // FIXME: support BigInt32
4711 if (node->isBinaryUseKind(HeapBigIntUse)) {
4712 SpeculateCellOperand left(this, leftChild);
4713 SpeculateCellOperand right(this, rightChild);
4714 GPRReg leftGPR = left.gpr();
4715 GPRReg rightGPR = right.gpr();
4716
4717 speculateHeapBigInt(leftChild, leftGPR);
4718 speculateHeapBigInt(rightChild, rightGPR);
4719
4720 flushRegisters();
4721 JSValueRegsFlushedCallResult result(this);
4722 JSValueRegs resultRegs = result.regs();
4723 callOperation(operationBitRShiftHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4724 m_jit.exceptionCheck();
4725
4726 jsValueResult(resultRegs, node);
4727 return;
4728 }
4729
4730 emitUntypedOrBigIntRightShiftBitOp(node);
4731}
4732
4733void SpeculativeJIT::compileShiftOp(Node* node)
4734{
4735 NodeType op = node->op();
4736 Edge& leftChild = node->child1();
4737 Edge& rightChild = node->child2();
4738
4739 if (leftChild.useKind() == UntypedUse || rightChild.useKind() == UntypedUse) {
4740 RELEASE_ASSERT(op == BitURShift);
4741 emitUntypedOrBigIntRightShiftBitOp(node);
4742 return;
4743 }
4744
4745 if (rightChild->isInt32Constant()) {
4746 SpeculateInt32Operand op1(this, leftChild);
4747 GPRTemporary result(this, Reuse, op1);
4748
4749 shiftOp(op, op1.gpr(), rightChild->asInt32() & 0x1f, result.gpr());
4750
4751 strictInt32Result(result.gpr(), node);
4752 } else {
4753 // Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
4754 SpeculateInt32Operand op1(this, leftChild);
4755 SpeculateInt32Operand op2(this, rightChild);
4756 GPRTemporary result(this, Reuse, op1);
4757
4758 GPRReg reg1 = op1.gpr();
4759 GPRReg reg2 = op2.gpr();
4760 shiftOp(op, reg1, reg2, result.gpr());
4761
4762 strictInt32Result(result.gpr(), node);
4763 }
4764}
4765
4766void SpeculativeJIT::compileValueAdd(Node* node)
4767{
4768 Edge& leftChild = node->child1();
4769 Edge& rightChild = node->child2();
4770
4771#if USE(BIGINT32)
4772 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
4773 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
4774 // https://bugs.webkit.org/show_bug.cgi?id=210957
4775 // https://bugs.webkit.org/show_bug.cgi?id=211040
4776 if (node->isBinaryUseKind(BigInt32Use)) {
4777 SpeculateBigInt32Operand left(this, leftChild);
4778 SpeculateBigInt32Operand right(this, rightChild);
4779 GPRTemporary result(this);
4780 GPRTemporary temp(this);
4781
4782 GPRReg leftGPR = left.gpr();
4783 GPRReg rightGPR = right.gpr();
4784 GPRReg resultGPR = result.gpr();
4785 GPRReg tempGPR = temp.gpr();
4786
4787 m_jit.unboxBigInt32(leftGPR, resultGPR);
4788 m_jit.unboxBigInt32(rightGPR, tempGPR);
4789
4790 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, resultGPR, tempGPR, resultGPR);
4791
4792 speculationCheck(BigInt32Overflow, JSValueRegs(), nullptr, check);
4793
4794 m_jit.boxBigInt32(resultGPR);
4795 jsValueResult(resultGPR, node);
4796 return;
4797 }
4798
4799 if (node->isBinaryUseKind(AnyBigIntUse)) {
4800 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
4801 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
4802 speculate(node, leftChild);
4803 speculate(node, rightChild);
4804 JSValueRegs leftRegs = left.jsValueRegs();
4805 JSValueRegs rightRegs = right.jsValueRegs();
4806
4807 flushRegisters();
4808 JSValueRegsFlushedCallResult result(this);
4809 JSValueRegs resultRegs = result.regs();
4810 // FIXME: call a more specialized function
4811 callOperation(operationValueAddNotNumber, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4812 m_jit.exceptionCheck();
4813
4814 jsValueResult(resultRegs, node);
4815 return;
4816 }
4817 // FIXME: add support for mixed BigInt32/HeapBigInt
4818#endif // USE(BIGINT32)
4819
4820 if (node->isBinaryUseKind(HeapBigIntUse)) {
4821 SpeculateCellOperand left(this, leftChild);
4822 SpeculateCellOperand right(this, rightChild);
4823 GPRReg leftGPR = left.gpr();
4824 GPRReg rightGPR = right.gpr();
4825
4826 speculateHeapBigInt(leftChild, leftGPR);
4827 speculateHeapBigInt(rightChild, rightGPR);
4828
4829 flushRegisters();
4830 JSValueRegsFlushedCallResult result(this);
4831 JSValueRegs resultRegs = result.regs();
4832 callOperation(operationAddHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4833 m_jit.exceptionCheck();
4834
4835 jsValueResult(resultRegs, node);
4836 return;
4837 }
4838
4839 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node())) {
4840 JSValueOperand left(this, leftChild);
4841 JSValueOperand right(this, rightChild);
4842 JSValueRegs leftRegs = left.jsValueRegs();
4843 JSValueRegs rightRegs = right.jsValueRegs();
4844
4845 flushRegisters();
4846 JSValueRegsFlushedCallResult result(this);
4847 JSValueRegs resultRegs = result.regs();
4848 callOperation(operationValueAddNotNumber, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4849 m_jit.exceptionCheck();
4850
4851 jsValueResult(resultRegs, node);
4852 return;
4853 }
4854
4855#if USE(JSVALUE64)
4856 bool needsScratchGPRReg = true;
4857 bool needsScratchFPRReg = false;
4858#else
4859 bool needsScratchGPRReg = true;
4860 bool needsScratchFPRReg = true;
4861#endif
4862
4863 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
4864 BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
4865 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
4866 JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile);
4867 auto repatchingFunction = operationValueAddOptimize;
4868 auto nonRepatchingFunction = operationValueAdd;
4869
4870 compileMathIC(node, addIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
4871}
4872
4873void SpeculativeJIT::compileValueSub(Node* node)
4874{
4875 Edge& leftChild = node->child1();
4876 Edge& rightChild = node->child2();
4877
4878#if USE(BIGINT32)
4879 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
4880 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
4881 // https://bugs.webkit.org/show_bug.cgi?id=210957
4882 // https://bugs.webkit.org/show_bug.cgi?id=211040
4883 if (node->binaryUseKind() == BigInt32Use) {
4884 SpeculateBigInt32Operand left(this, node->child1());
4885 SpeculateBigInt32Operand right(this, node->child2());
4886 GPRTemporary result(this);
4887 GPRTemporary temp(this);
4888
4889 GPRReg leftGPR = left.gpr();
4890 GPRReg rightGPR = right.gpr();
4891 GPRReg resultGPR = result.gpr();
4892 GPRReg tempGPR = temp.gpr();
4893
4894 m_jit.unboxBigInt32(leftGPR, resultGPR);
4895 m_jit.unboxBigInt32(rightGPR, tempGPR);
4896
4897 MacroAssembler::Jump check = m_jit.branchSub32(MacroAssembler::Overflow, resultGPR, tempGPR, resultGPR);
4898
4899 speculationCheck(BigInt32Overflow, JSValueRegs(), nullptr, check);
4900
4901 m_jit.boxBigInt32(resultGPR);
4902 jsValueResult(resultGPR, node);
4903 return;
4904 }
4905 // FIXME: add support for mixed BigInt32/HeapBigInt
4906
4907 // FIXME: why do compileValueAdd/compileValueMul use isKnownNotNumber but not ValueSub?
4908 if (node->binaryUseKind() == AnyBigIntUse) {
4909 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
4910 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
4911 speculateAnyBigInt(leftChild);
4912 speculateAnyBigInt(rightChild);
4913 JSValueRegs leftRegs = left.jsValueRegs();
4914 JSValueRegs rightRegs = right.jsValueRegs();
4915
4916 flushRegisters();
4917 JSValueRegsFlushedCallResult result(this);
4918 JSValueRegs resultRegs = result.regs();
4919 callOperation(operationValueSub, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
4920 m_jit.exceptionCheck();
4921
4922 jsValueResult(resultRegs, node);
4923 return;
4924 }
4925#endif // USE(BIGINT32)
4926
4927 if (node->binaryUseKind() == HeapBigIntUse) {
4928 SpeculateCellOperand left(this, node->child1());
4929 SpeculateCellOperand right(this, node->child2());
4930 GPRReg leftGPR = left.gpr();
4931 GPRReg rightGPR = right.gpr();
4932
4933 speculateHeapBigInt(leftChild, leftGPR);
4934 speculateHeapBigInt(rightChild, rightGPR);
4935
4936 flushRegisters();
4937 JSValueRegsFlushedCallResult result(this);
4938 JSValueRegs resultRegs = result.regs();
4939
4940 callOperation(operationSubHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
4941
4942 m_jit.exceptionCheck();
4943 jsValueResult(resultRegs, node);
4944 return;
4945 }
4946
4947#if USE(JSVALUE64)
4948 bool needsScratchGPRReg = true;
4949 bool needsScratchFPRReg = false;
4950#else
4951 bool needsScratchGPRReg = true;
4952 bool needsScratchFPRReg = true;
4953#endif
4954
4955 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
4956 BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
4957 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
4958 JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile);
4959 auto repatchingFunction = operationValueSubOptimize;
4960 auto nonRepatchingFunction = operationValueSub;
4961
4962 compileMathIC(node, subIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
4963}
4964
4965template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
4966void SpeculativeJIT::compileMathIC(Node* node, JITBinaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, bool needsScratchFPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
4967{
4968 Edge& leftChild = node->child1();
4969 Edge& rightChild = node->child2();
4970
4971 std::optional<JSValueOperand> left;
4972 std::optional<JSValueOperand> right;
4973
4974 JSValueRegs leftRegs;
4975 JSValueRegs rightRegs;
4976
4977 FPRTemporary leftNumber(this);
4978 FPRTemporary rightNumber(this);
4979 FPRReg leftFPR = leftNumber.fpr();
4980 FPRReg rightFPR = rightNumber.fpr();
4981
4982 GPRReg scratchGPR = InvalidGPRReg;
4983 FPRReg scratchFPR = InvalidFPRReg;
4984
4985 std::optional<FPRTemporary> fprScratch;
4986 if (needsScratchFPRReg) {
4987 fprScratch.emplace(this);
4988 scratchFPR = fprScratch->fpr();
4989 }
4990
4991#if USE(JSVALUE64)
4992 std::optional<GPRTemporary> gprScratch;
4993 if (needsScratchGPRReg) {
4994 gprScratch.emplace(this);
4995 scratchGPR = gprScratch->gpr();
4996 }
4997 GPRTemporary result(this);
4998 JSValueRegs resultRegs = JSValueRegs(result.gpr());
4999#else
5000 GPRTemporary resultTag(this);
5001 GPRTemporary resultPayload(this);
5002 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
5003 if (needsScratchGPRReg)
5004 scratchGPR = resultRegs.tagGPR();
5005#endif
5006
5007 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
5008 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
5009
5010 // The snippet generator does not support both operands being constant. If the left
5011 // operand is already const, we'll ignore the right operand's constness.
5012 if (leftChild->isInt32Constant())
5013 leftOperand.setConstInt32(leftChild->asInt32());
5014 else if (rightChild->isInt32Constant())
5015 rightOperand.setConstInt32(rightChild->asInt32());
5016
5017 ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
5018 ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
5019
5020 if (!Generator::isLeftOperandValidConstant(leftOperand)) {
5021 left.emplace(this, leftChild);
5022 leftRegs = left->jsValueRegs();
5023 }
5024 if (!Generator::isRightOperandValidConstant(rightOperand)) {
5025 right.emplace(this, rightChild);
5026 rightRegs = right->jsValueRegs();
5027 }
5028
5029#if ENABLE(MATH_IC_STATS)
5030 auto inlineStart = m_jit.label();
5031#endif
5032
5033 Box<MathICGenerationState> addICGenerationState = Box<MathICGenerationState>::create();
5034 mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, leftFPR, rightFPR, scratchGPR, scratchFPR);
5035
5036 bool shouldEmitProfiling = false;
5037 bool generatedInline = mathIC->generateInline(m_jit, *addICGenerationState, shouldEmitProfiling);
5038 if (generatedInline) {
5039 ASSERT(!addICGenerationState->slowPathJumps.empty());
5040
5041 Vector<SilentRegisterSavePlan> savePlans;
5042 silentSpillAllRegistersImpl(false, savePlans, resultRegs);
5043
5044 auto done = m_jit.label();
5045
5046 addSlowPathGeneratorLambda([=, savePlans = WTFMove(savePlans)] () {
5047 addICGenerationState->slowPathJumps.link(&m_jit);
5048 addICGenerationState->slowPathStart = m_jit.label();
5049#if ENABLE(MATH_IC_STATS)
5050 auto slowPathStart = m_jit.label();
5051#endif
5052
5053 silentSpill(savePlans);
5054
5055 auto innerLeftRegs = leftRegs;
5056 auto innerRightRegs = rightRegs;
5057 if (Generator::isLeftOperandValidConstant(leftOperand)) {
5058 innerLeftRegs = resultRegs;
5059 m_jit.moveValue(leftChild->asJSValue(), innerLeftRegs);
5060 } else if (Generator::isRightOperandValidConstant(rightOperand)) {
5061 innerRightRegs = resultRegs;
5062 m_jit.moveValue(rightChild->asJSValue(), innerRightRegs);
5063 }
5064
5065 if (addICGenerationState->shouldSlowPathRepatch)
5066 addICGenerationState->slowPathCall = callOperation(repatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), innerLeftRegs, innerRightRegs, TrustedImmPtr(mathIC));
5067 else
5068 addICGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), innerLeftRegs, innerRightRegs);
5069
5070 silentFill(savePlans);
5071 m_jit.exceptionCheck();
5072 m_jit.jump().linkTo(done, &m_jit);
5073
5074 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5075 mathIC->finalizeInlineCode(*addICGenerationState, linkBuffer);
5076 });
5077
5078#if ENABLE(MATH_IC_STATS)
5079 auto slowPathEnd = m_jit.label();
5080 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5081 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
5082 mathIC->m_generatedCodeSize += size;
5083 });
5084#endif
5085
5086 });
5087 } else {
5088 if (Generator::isLeftOperandValidConstant(leftOperand)) {
5089 left.emplace(this, leftChild);
5090 leftRegs = left->jsValueRegs();
5091 } else if (Generator::isRightOperandValidConstant(rightOperand)) {
5092 right.emplace(this, rightChild);
5093 rightRegs = right->jsValueRegs();
5094 }
5095
5096 flushRegisters();
5097 callOperation(nonRepatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
5098 m_jit.exceptionCheck();
5099 }
5100
5101#if ENABLE(MATH_IC_STATS)
5102 auto inlineEnd = m_jit.label();
5103 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5104 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
5105 mathIC->m_generatedCodeSize += size;
5106 });
5107#endif
5108
5109 jsValueResult(resultRegs, node);
5110 return;
5111}
5112
5113void SpeculativeJIT::compileInstanceOfCustom(Node* node)
5114{
5115 // We could do something smarter here but this case is currently super rare and unless
5116 // Symbol.hasInstance becomes popular will likely remain that way.
5117
5118 JSValueOperand value(this, node->child1());
5119 SpeculateCellOperand constructor(this, node->child2());
5120 JSValueOperand hasInstanceValue(this, node->child3());
5121 GPRTemporary result(this);
5122
5123 JSValueRegs valueRegs = value.jsValueRegs();
5124 GPRReg constructorGPR = constructor.gpr();
5125 JSValueRegs hasInstanceRegs = hasInstanceValue.jsValueRegs();
5126 GPRReg resultGPR = result.gpr();
5127
5128 MacroAssembler::Jump slowCase = m_jit.jump();
5129
5130 addSlowPathGenerator(slowPathCall(slowCase, this, operationInstanceOfCustom, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs, constructorGPR, hasInstanceRegs));
5131
5132 unblessedBooleanResult(resultGPR, node);
5133}
5134
5135void SpeculativeJIT::compileIsCellWithType(Node* node)
5136{
5137 switch (node->child1().useKind()) {
5138 case UntypedUse: {
5139 JSValueOperand value(this, node->child1());
5140 GPRTemporary result(this, Reuse, value, PayloadWord);
5141
5142 JSValueRegs valueRegs = value.jsValueRegs();
5143 GPRReg resultGPR = result.gpr();
5144
5145 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
5146
5147 m_jit.compare8(JITCompiler::Equal,
5148 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
5149 TrustedImm32(node->queriedType()),
5150 resultGPR);
5151 blessBoolean(resultGPR);
5152 JITCompiler::Jump done = m_jit.jump();
5153
5154 isNotCell.link(&m_jit);
5155 moveFalseTo(resultGPR);
5156
5157 done.link(&m_jit);
5158 blessedBooleanResult(resultGPR, node);
5159 return;
5160 }
5161
5162 case CellUse: {
5163 SpeculateCellOperand cell(this, node->child1());
5164 GPRTemporary result(this, Reuse, cell);
5165
5166 GPRReg cellGPR = cell.gpr();
5167 GPRReg resultGPR = result.gpr();
5168
5169 m_jit.compare8(JITCompiler::Equal,
5170 JITCompiler::Address(cellGPR, JSCell::typeInfoTypeOffset()),
5171 TrustedImm32(node->queriedType()),
5172 resultGPR);
5173 blessBoolean(resultGPR);
5174 blessedBooleanResult(resultGPR, node);
5175 return;
5176 }
5177
5178 default:
5179 RELEASE_ASSERT_NOT_REACHED();
5180 break;
5181 }
5182}
5183
5184void SpeculativeJIT::compileIsTypedArrayView(Node* node)
5185{
5186 JSValueOperand value(this, node->child1());
5187 GPRTemporary result(this, Reuse, value, PayloadWord);
5188
5189 JSValueRegs valueRegs = value.jsValueRegs();
5190 GPRReg resultGPR = result.gpr();
5191
5192 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
5193
5194 m_jit.load8(JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()), resultGPR);
5195 m_jit.sub32(TrustedImm32(FirstTypedArrayType), resultGPR);
5196 m_jit.compare32(JITCompiler::Below,
5197 resultGPR,
5198 TrustedImm32(NumberOfTypedArrayTypesExcludingDataView),
5199 resultGPR);
5200 blessBoolean(resultGPR);
5201 JITCompiler::Jump done = m_jit.jump();
5202
5203 isNotCell.link(&m_jit);
5204 moveFalseTo(resultGPR);
5205
5206 done.link(&m_jit);
5207 blessedBooleanResult(resultGPR, node);
5208}
5209
5210void SpeculativeJIT::compileToObjectOrCallObjectConstructor(Node* node)
5211{
5212 RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
5213
5214 JSValueOperand value(this, node->child1());
5215 GPRTemporary result(this, Reuse, value, PayloadWord);
5216
5217 JSValueRegs valueRegs = value.jsValueRegs();
5218 GPRReg resultGPR = result.gpr();
5219
5220 MacroAssembler::JumpList slowCases;
5221 slowCases.append(m_jit.branchIfNotCell(valueRegs));
5222 slowCases.append(m_jit.branchIfNotObject(valueRegs.payloadGPR()));
5223 m_jit.move(valueRegs.payloadGPR(), resultGPR);
5224
5225 if (node->op() == ToObject) {
5226 UniquedStringImpl* errorMessage = nullptr;
5227 if (node->identifierNumber() != UINT32_MAX)
5228 errorMessage = identifierUID(node->identifierNumber());
5229 addSlowPathGenerator(slowPathCall(slowCases, this, operationToObject, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs, errorMessage));
5230 } else
5231 addSlowPathGenerator(slowPathCall(slowCases, this, operationCallObjectConstructor, resultGPR, TrustedImmPtr(node->cellOperand()), valueRegs));
5232
5233 cellResult(resultGPR, node);
5234}
5235
5236void SpeculativeJIT::compileArithAdd(Node* node)
5237{
5238 switch (node->binaryUseKind()) {
5239 case Int32Use: {
5240 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
5241
5242 if (node->child2()->isInt32Constant()) {
5243 SpeculateInt32Operand op1(this, node->child1());
5244 GPRTemporary result(this, Reuse, op1);
5245
5246 GPRReg gpr1 = op1.gpr();
5247 int32_t imm2 = node->child2()->asInt32();
5248 GPRReg gprResult = result.gpr();
5249
5250 if (!shouldCheckOverflow(node->arithMode())) {
5251 m_jit.add32(Imm32(imm2), gpr1, gprResult);
5252 strictInt32Result(gprResult, node);
5253 return;
5254 }
5255
5256 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, Imm32(imm2), gprResult);
5257 if (gpr1 == gprResult) {
5258 speculationCheck(Overflow, JSValueRegs(), nullptr, check,
5259 SpeculationRecovery(SpeculativeAddImmediate, gpr1, imm2));
5260 } else
5261 speculationCheck(Overflow, JSValueRegs(), nullptr, check);
5262
5263 strictInt32Result(gprResult, node);
5264 return;
5265 }
5266
5267 SpeculateInt32Operand op1(this, node->child1());
5268 SpeculateInt32Operand op2(this, node->child2());
5269 GPRTemporary result(this, Reuse, op1, op2);
5270
5271 GPRReg gpr1 = op1.gpr();
5272 GPRReg gpr2 = op2.gpr();
5273 GPRReg gprResult = result.gpr();
5274
5275 if (!shouldCheckOverflow(node->arithMode()))
5276 m_jit.add32(gpr1, gpr2, gprResult);
5277 else {
5278 MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
5279
5280 if (gpr1 == gprResult && gpr2 == gprResult)
5281 speculationCheck(Overflow, JSValueRegs(), nullptr, check, SpeculationRecovery(SpeculativeAddSelf, gprResult, gpr2));
5282 else if (gpr1 == gprResult)
5283 speculationCheck(Overflow, JSValueRegs(), nullptr, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
5284 else if (gpr2 == gprResult)
5285 speculationCheck(Overflow, JSValueRegs(), nullptr, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
5286 else
5287 speculationCheck(Overflow, JSValueRegs(), nullptr, check);
5288 }
5289
5290 strictInt32Result(gprResult, node);
5291 return;
5292 }
5293
5294#if USE(JSVALUE64)
5295 case Int52RepUse: {
5296 ASSERT(shouldCheckOverflow(node->arithMode()));
5297 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
5298
5299 // Will we need an overflow check? If we can prove that neither input can be
5300 // Int52 then the overflow check will not be necessary.
5301 if (!m_state.forNode(node->child1()).couldBeType(SpecNonInt32AsInt52)
5302 && !m_state.forNode(node->child2()).couldBeType(SpecNonInt32AsInt52)) {
5303 SpeculateWhicheverInt52Operand op1(this, node->child1());
5304 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
5305 GPRTemporary result(this, Reuse, op1);
5306 m_jit.add64(op1.gpr(), op2.gpr(), result.gpr());
5307 int52Result(result.gpr(), node, op1.format());
5308 return;
5309 }
5310
5311 SpeculateInt52Operand op1(this, node->child1());
5312 SpeculateInt52Operand op2(this, node->child2());
5313 GPRTemporary result(this);
5314 m_jit.move(op1.gpr(), result.gpr());
5315 speculationCheck(
5316 Int52Overflow, JSValueRegs(), nullptr,
5317 m_jit.branchAdd64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
5318 int52Result(result.gpr(), node);
5319 return;
5320 }
5321#endif // USE(JSVALUE64)
5322
5323 case DoubleRepUse: {
5324 SpeculateDoubleOperand op1(this, node->child1());
5325 SpeculateDoubleOperand op2(this, node->child2());
5326 FPRTemporary result(this, op1, op2);
5327
5328 FPRReg reg1 = op1.fpr();
5329 FPRReg reg2 = op2.fpr();
5330 m_jit.addDouble(reg1, reg2, result.fpr());
5331
5332 doubleResult(result.fpr(), node);
5333 return;
5334 }
5335
5336 default:
5337 RELEASE_ASSERT_NOT_REACHED();
5338 break;
5339 }
5340}
5341
5342void SpeculativeJIT::compileArithAbs(Node* node)
5343{
5344 switch (node->child1().useKind()) {
5345 case Int32Use: {
5346 SpeculateStrictInt32Operand op1(this, node->child1());
5347 GPRTemporary result(this, Reuse, op1);
5348 GPRTemporary scratch(this);
5349
5350 m_jit.move(op1.gpr(), result.gpr());
5351 m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
5352 m_jit.add32(scratch.gpr(), result.gpr());
5353 m_jit.xor32(scratch.gpr(), result.gpr());
5354 if (shouldCheckOverflow(node->arithMode()))
5355 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Signed, result.gpr()));
5356 strictInt32Result(result.gpr(), node);
5357 break;
5358 }
5359
5360 case DoubleRepUse: {
5361 SpeculateDoubleOperand op1(this, node->child1());
5362 FPRTemporary result(this);
5363
5364 m_jit.absDouble(op1.fpr(), result.fpr());
5365 doubleResult(result.fpr(), node);
5366 break;
5367 }
5368
5369 default: {
5370 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
5371 JSValueOperand op1(this, node->child1());
5372 JSValueRegs op1Regs = op1.jsValueRegs();
5373 flushRegisters();
5374 FPRResult result(this);
5375 callOperation(operationArithAbs, result.fpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
5376 m_jit.exceptionCheck();
5377 doubleResult(result.fpr(), node);
5378 break;
5379 }
5380 }
5381}
5382
5383void SpeculativeJIT::compileArithClz32(Node* node)
5384{
5385 if (node->child1().useKind() == Int32Use || node->child1().useKind() == KnownInt32Use) {
5386 SpeculateInt32Operand value(this, node->child1());
5387 GPRTemporary result(this, Reuse, value);
5388 GPRReg valueReg = value.gpr();
5389 GPRReg resultReg = result.gpr();
5390 m_jit.countLeadingZeros32(valueReg, resultReg);
5391 strictInt32Result(resultReg, node);
5392 return;
5393 }
5394 JSValueOperand op1(this, node->child1());
5395 JSValueRegs op1Regs = op1.jsValueRegs();
5396 GPRTemporary result(this);
5397 GPRReg resultReg = result.gpr();
5398 flushRegisters();
5399 callOperation(operationArithClz32, resultReg, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
5400 m_jit.exceptionCheck();
5401 strictInt32Result(resultReg, node);
5402}
5403
5404void SpeculativeJIT::compileArithDoubleUnaryOp(Node* node, double (*doubleFunction)(double), double (*operation)(JSGlobalObject*, EncodedJSValue))
5405{
5406 if (node->child1().useKind() == DoubleRepUse) {
5407 SpeculateDoubleOperand op1(this, node->child1());
5408 FPRReg op1FPR = op1.fpr();
5409
5410 flushRegisters();
5411
5412 FPRResult result(this);
5413 callOperation(doubleFunction, result.fpr(), op1FPR);
5414
5415 doubleResult(result.fpr(), node);
5416 return;
5417 }
5418
5419 JSValueOperand op1(this, node->child1());
5420 JSValueRegs op1Regs = op1.jsValueRegs();
5421 flushRegisters();
5422 FPRResult result(this);
5423 callOperation(operation, result.fpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
5424 m_jit.exceptionCheck();
5425 doubleResult(result.fpr(), node);
5426}
5427
5428void SpeculativeJIT::compileArithSub(Node* node)
5429{
5430 switch (node->binaryUseKind()) {
5431 case Int32Use: {
5432 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
5433
5434 if (node->child2()->isInt32Constant()) {
5435 SpeculateInt32Operand op1(this, node->child1());
5436 int32_t imm2 = node->child2()->asInt32();
5437 GPRTemporary result(this);
5438
5439 if (!shouldCheckOverflow(node->arithMode())) {
5440 m_jit.move(op1.gpr(), result.gpr());
5441 m_jit.sub32(Imm32(imm2), result.gpr());
5442 } else {
5443 GPRTemporary scratch(this);
5444 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
5445 }
5446
5447 strictInt32Result(result.gpr(), node);
5448 return;
5449 }
5450
5451 if (node->child1()->isInt32Constant()) {
5452 int32_t imm1 = node->child1()->asInt32();
5453 SpeculateInt32Operand op2(this, node->child2());
5454 GPRTemporary result(this);
5455
5456 m_jit.move(Imm32(imm1), result.gpr());
5457 if (!shouldCheckOverflow(node->arithMode()))
5458 m_jit.sub32(op2.gpr(), result.gpr());
5459 else
5460 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
5461
5462 strictInt32Result(result.gpr(), node);
5463 return;
5464 }
5465
5466 SpeculateInt32Operand op1(this, node->child1());
5467 SpeculateInt32Operand op2(this, node->child2());
5468 GPRTemporary result(this);
5469
5470 if (!shouldCheckOverflow(node->arithMode())) {
5471 m_jit.move(op1.gpr(), result.gpr());
5472 m_jit.sub32(op2.gpr(), result.gpr());
5473 } else
5474 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
5475
5476 strictInt32Result(result.gpr(), node);
5477 return;
5478 }
5479
5480#if USE(JSVALUE64)
5481 case Int52RepUse: {
5482 ASSERT(shouldCheckOverflow(node->arithMode()));
5483 ASSERT(!shouldCheckNegativeZero(node->arithMode()));
5484
5485 // Will we need an overflow check? If we can prove that neither input can be
5486 // Int52 then the overflow check will not be necessary.
5487 if (!m_state.forNode(node->child1()).couldBeType(SpecNonInt32AsInt52)
5488 && !m_state.forNode(node->child2()).couldBeType(SpecNonInt32AsInt52)) {
5489 SpeculateWhicheverInt52Operand op1(this, node->child1());
5490 SpeculateWhicheverInt52Operand op2(this, node->child2(), op1);
5491 GPRTemporary result(this, Reuse, op1);
5492 m_jit.move(op1.gpr(), result.gpr());
5493 m_jit.sub64(op2.gpr(), result.gpr());
5494 int52Result(result.gpr(), node, op1.format());
5495 return;
5496 }
5497
5498 SpeculateInt52Operand op1(this, node->child1());
5499 SpeculateInt52Operand op2(this, node->child2());
5500 GPRTemporary result(this);
5501 m_jit.move(op1.gpr(), result.gpr());
5502 speculationCheck(
5503 Int52Overflow, JSValueRegs(), nullptr,
5504 m_jit.branchSub64(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
5505 int52Result(result.gpr(), node);
5506 return;
5507 }
5508#endif // USE(JSVALUE64)
5509
5510 case DoubleRepUse: {
5511 SpeculateDoubleOperand op1(this, node->child1());
5512 SpeculateDoubleOperand op2(this, node->child2());
5513 FPRTemporary result(this, op1);
5514
5515 FPRReg reg1 = op1.fpr();
5516 FPRReg reg2 = op2.fpr();
5517 m_jit.subDouble(reg1, reg2, result.fpr());
5518
5519 doubleResult(result.fpr(), node);
5520 return;
5521 }
5522
5523 default:
5524 RELEASE_ASSERT_NOT_REACHED();
5525 return;
5526 }
5527}
5528
5529void SpeculativeJIT::compileIncOrDec(Node* node)
5530{
5531 // In all other cases the node should have been transformed into an add or a sub by FixupPhase
5532 ASSERT(node->child1().useKind() == UntypedUse);
5533
5534 JSValueOperand op1(this, node->child1());
5535 JSValueRegs op1Regs = op1.jsValueRegs();
5536 flushRegisters();
5537 JSValueRegsFlushedCallResult result(this);
5538 JSValueRegs resultRegs = result.regs();
5539 auto operation = node->op() == Inc ? operationInc : operationDec;
5540 callOperation(operation, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
5541 m_jit.exceptionCheck();
5542 jsValueResult(resultRegs, node);
5543}
5544
5545void SpeculativeJIT::compileValueNegate(Node* node)
5546{
5547 // FIXME: add a fast path, at least for BigInt32, but probably also for HeapBigInt here.
5548 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
5549 BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
5550 UnaryArithProfile* arithProfile = baselineCodeBlock->unaryArithProfileForBytecodeIndex(bytecodeIndex);
5551 JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile);
5552 auto repatchingFunction = operationArithNegateOptimize;
5553 auto nonRepatchingFunction = operationArithNegate;
5554 bool needsScratchGPRReg = true;
5555 compileMathIC(node, negIC, needsScratchGPRReg, repatchingFunction, nonRepatchingFunction);
5556}
5557
5558void SpeculativeJIT::compileArithNegate(Node* node)
5559{
5560 switch (node->child1().useKind()) {
5561 case Int32Use: {
5562 SpeculateInt32Operand op1(this, node->child1());
5563 GPRTemporary result(this);
5564
5565 m_jit.move(op1.gpr(), result.gpr());
5566
5567 // Note: there is no notion of being not used as a number, but someone
5568 // caring about negative zero.
5569
5570 if (!shouldCheckOverflow(node->arithMode()))
5571 m_jit.neg32(result.gpr());
5572 else if (!shouldCheckNegativeZero(node->arithMode()))
5573 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
5574 else {
5575 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, result.gpr(), TrustedImm32(0x7fffffff)));
5576 m_jit.neg32(result.gpr());
5577 }
5578
5579 strictInt32Result(result.gpr(), node);
5580 return;
5581 }
5582
5583#if USE(JSVALUE64)
5584 case Int52RepUse: {
5585 ASSERT(shouldCheckOverflow(node->arithMode()));
5586
5587 if (!m_state.forNode(node->child1()).couldBeType(SpecNonInt32AsInt52)) {
5588 SpeculateWhicheverInt52Operand op1(this, node->child1());
5589 GPRTemporary result(this);
5590 GPRReg op1GPR = op1.gpr();
5591 GPRReg resultGPR = result.gpr();
5592 m_jit.move(op1GPR, resultGPR);
5593 m_jit.neg64(resultGPR);
5594 if (shouldCheckNegativeZero(node->arithMode())) {
5595 speculationCheck(
5596 NegativeZero, JSValueRegs(), nullptr,
5597 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
5598 }
5599 int52Result(resultGPR, node, op1.format());
5600 return;
5601 }
5602
5603 SpeculateInt52Operand op1(this, node->child1());
5604 GPRTemporary result(this);
5605 GPRReg op1GPR = op1.gpr();
5606 GPRReg resultGPR = result.gpr();
5607 m_jit.move(op1GPR, resultGPR);
5608 speculationCheck(
5609 Int52Overflow, JSValueRegs(), nullptr,
5610 m_jit.branchNeg64(MacroAssembler::Overflow, resultGPR));
5611 if (shouldCheckNegativeZero(node->arithMode())) {
5612 speculationCheck(
5613 NegativeZero, JSValueRegs(), nullptr,
5614 m_jit.branchTest64(MacroAssembler::Zero, resultGPR));
5615 }
5616 int52Result(resultGPR, node);
5617 return;
5618 }
5619#endif // USE(JSVALUE64)
5620
5621 case DoubleRepUse: {
5622 SpeculateDoubleOperand op1(this, node->child1());
5623 FPRTemporary result(this);
5624
5625 m_jit.negateDouble(op1.fpr(), result.fpr());
5626
5627 doubleResult(result.fpr(), node);
5628 return;
5629 }
5630
5631 default: {
5632 RELEASE_ASSERT_NOT_REACHED();
5633 }
5634 }
5635}
5636
5637template <typename Generator, typename RepatchingFunction, typename NonRepatchingFunction>
5638void SpeculativeJIT::compileMathIC(Node* node, JITUnaryMathIC<Generator>* mathIC, bool needsScratchGPRReg, RepatchingFunction repatchingFunction, NonRepatchingFunction nonRepatchingFunction)
5639{
5640 GPRReg scratchGPR = InvalidGPRReg;
5641 std::optional<GPRTemporary> gprScratch;
5642 if (needsScratchGPRReg) {
5643 gprScratch.emplace(this);
5644 scratchGPR = gprScratch->gpr();
5645 }
5646 JSValueOperand childOperand(this, node->child1());
5647 JSValueRegs childRegs = childOperand.jsValueRegs();
5648#if USE(JSVALUE64)
5649 GPRTemporary result(this, Reuse, childOperand);
5650 JSValueRegs resultRegs(result.gpr());
5651#else
5652 GPRTemporary resultTag(this);
5653 GPRTemporary resultPayload(this);
5654 JSValueRegs resultRegs(resultPayload.gpr(), resultTag.gpr());
5655#endif
5656
5657#if ENABLE(MATH_IC_STATS)
5658 auto inlineStart = m_jit.label();
5659#endif
5660
5661 Box<MathICGenerationState> icGenerationState = Box<MathICGenerationState>::create();
5662 mathIC->m_generator = Generator(resultRegs, childRegs, scratchGPR);
5663
5664 bool shouldEmitProfiling = false;
5665 bool generatedInline = mathIC->generateInline(m_jit, *icGenerationState, shouldEmitProfiling);
5666 if (generatedInline) {
5667 ASSERT(!icGenerationState->slowPathJumps.empty());
5668
5669 Vector<SilentRegisterSavePlan> savePlans;
5670 silentSpillAllRegistersImpl(false, savePlans, resultRegs);
5671
5672 auto done = m_jit.label();
5673
5674 addSlowPathGeneratorLambda([=, savePlans = WTFMove(savePlans)] () {
5675 icGenerationState->slowPathJumps.link(&m_jit);
5676 icGenerationState->slowPathStart = m_jit.label();
5677#if ENABLE(MATH_IC_STATS)
5678 auto slowPathStart = m_jit.label();
5679#endif
5680
5681 silentSpill(savePlans);
5682
5683 if (icGenerationState->shouldSlowPathRepatch)
5684 icGenerationState->slowPathCall = callOperation(repatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), childRegs, TrustedImmPtr(mathIC));
5685 else
5686 icGenerationState->slowPathCall = callOperation(nonRepatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), childRegs);
5687
5688 silentFill(savePlans);
5689 m_jit.exceptionCheck();
5690 m_jit.jump().linkTo(done, &m_jit);
5691
5692 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5693 mathIC->finalizeInlineCode(*icGenerationState, linkBuffer);
5694 });
5695
5696#if ENABLE(MATH_IC_STATS)
5697 auto slowPathEnd = m_jit.label();
5698 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5699 size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
5700 mathIC->m_generatedCodeSize += size;
5701 });
5702#endif
5703
5704 });
5705 } else {
5706 flushRegisters();
5707 callOperation(nonRepatchingFunction, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), childRegs);
5708 m_jit.exceptionCheck();
5709 }
5710
5711#if ENABLE(MATH_IC_STATS)
5712 auto inlineEnd = m_jit.label();
5713 m_jit.addLinkTask([=] (LinkBuffer& linkBuffer) {
5714 size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
5715 mathIC->m_generatedCodeSize += size;
5716 });
5717#endif
5718
5719 jsValueResult(resultRegs, node);
5720 return;
5721}
5722
5723void SpeculativeJIT::compileValueMul(Node* node)
5724{
5725 Edge& leftChild = node->child1();
5726 Edge& rightChild = node->child2();
5727
5728#if USE(BIGINT32)
5729 // FIXME: Introduce another BigInt32 code generation: binary use kinds are BigIntUse32, but result is SpecAnyInt and accepting overflow.
5730 // Let's distinguish these modes based on result type information by introducing NodeResultBigInt32.
5731 // https://bugs.webkit.org/show_bug.cgi?id=210957
5732 // https://bugs.webkit.org/show_bug.cgi?id=211040
5733 if (node->binaryUseKind() == BigInt32Use) {
5734 // FIXME: the code between compileValueAdd, compileValueSub and compileValueMul for BigInt32 is nearly identical, so try to get rid of the duplication.
5735 SpeculateBigInt32Operand left(this, node->child1());
5736 SpeculateBigInt32Operand right(this, node->child2());
5737 GPRTemporary result(this);
5738 GPRTemporary temp(this);
5739
5740 GPRReg leftGPR = left.gpr();
5741 GPRReg rightGPR = right.gpr();
5742 GPRReg resultGPR = result.gpr();
5743 GPRReg tempGPR = temp.gpr();
5744
5745 m_jit.unboxBigInt32(leftGPR, resultGPR);
5746 m_jit.unboxBigInt32(rightGPR, tempGPR);
5747
5748 MacroAssembler::Jump check = m_jit.branchMul32(MacroAssembler::Overflow, resultGPR, tempGPR, resultGPR);
5749
5750 speculationCheck(BigInt32Overflow, JSValueRegs(), nullptr, check);
5751
5752 m_jit.boxBigInt32(resultGPR);
5753 jsValueResult(resultGPR, node);
5754 return;
5755 }
5756 // FIXME: add support for mixed BigInt32/HeapBigInt
5757#endif
5758
5759 if (leftChild.useKind() == HeapBigIntUse && rightChild.useKind() == HeapBigIntUse) {
5760 SpeculateCellOperand left(this, leftChild);
5761 SpeculateCellOperand right(this, rightChild);
5762 GPRReg leftGPR = left.gpr();
5763 GPRReg rightGPR = right.gpr();
5764
5765 speculateHeapBigInt(leftChild, leftGPR);
5766 speculateHeapBigInt(rightChild, rightGPR);
5767
5768 flushRegisters();
5769 JSValueRegsFlushedCallResult result(this);
5770 JSValueRegs resultRegs = result.regs();
5771
5772 callOperation(operationMulHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
5773
5774 m_jit.exceptionCheck();
5775 jsValueResult(resultRegs, node);
5776 return;
5777 }
5778
5779 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node()) || node->isBinaryUseKind(AnyBigIntUse)) {
5780 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
5781 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
5782 speculate(node, leftChild);
5783 speculate(node, rightChild);
5784 JSValueRegs leftRegs = left.jsValueRegs();
5785 JSValueRegs rightRegs = right.jsValueRegs();
5786
5787 flushRegisters();
5788 JSValueRegsFlushedCallResult result(this);
5789 JSValueRegs resultRegs = result.regs();
5790 callOperation(operationValueMul, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
5791 m_jit.exceptionCheck();
5792
5793 jsValueResult(resultRegs, node);
5794 return;
5795 }
5796
5797 bool needsScratchGPRReg = true;
5798#if USE(JSVALUE64)
5799 bool needsScratchFPRReg = false;
5800#else
5801 bool needsScratchFPRReg = true;
5802#endif
5803
5804 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic);
5805 BytecodeIndex bytecodeIndex = node->origin.semantic.bytecodeIndex();
5806 BinaryArithProfile* arithProfile = baselineCodeBlock->binaryArithProfileForBytecodeIndex(bytecodeIndex);
5807 JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile);
5808 auto repatchingFunction = operationValueMulOptimize;
5809 auto nonRepatchingFunction = operationValueMul;
5810
5811 compileMathIC(node, mulIC, needsScratchGPRReg, needsScratchFPRReg, repatchingFunction, nonRepatchingFunction);
5812}
5813
5814void SpeculativeJIT::compileArithMul(Node* node)
5815{
5816 switch (node->binaryUseKind()) {
5817 case Int32Use: {
5818 if (node->child2()->isInt32Constant()) {
5819 SpeculateInt32Operand op1(this, node->child1());
5820 GPRTemporary result(this);
5821
5822 int32_t imm = node->child2()->asInt32();
5823 GPRReg op1GPR = op1.gpr();
5824 GPRReg resultGPR = result.gpr();
5825
5826 if (!shouldCheckOverflow(node->arithMode()))
5827 m_jit.mul32(Imm32(imm), op1GPR, resultGPR);
5828 else {
5829 speculationCheck(Overflow, JSValueRegs(), nullptr,
5830 m_jit.branchMul32(MacroAssembler::Overflow, op1GPR, Imm32(imm), resultGPR));
5831 }
5832
5833 // The only way to create negative zero with a constant is:
5834 // -negative-op1 * 0.
5835 // -zero-op1 * negative constant.
5836 if (shouldCheckNegativeZero(node->arithMode())) {
5837 if (!imm)
5838 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Signed, op1GPR));
5839 else if (imm < 0) {
5840 if (shouldCheckOverflow(node->arithMode()))
5841 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, resultGPR));
5842 else
5843 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op1GPR));
5844 }
5845 }
5846
5847 strictInt32Result(resultGPR, node);
5848 return;
5849 }
5850 SpeculateInt32Operand op1(this, node->child1());
5851 SpeculateInt32Operand op2(this, node->child2());
5852 GPRTemporary result(this);
5853
5854 GPRReg reg1 = op1.gpr();
5855 GPRReg reg2 = op2.gpr();
5856
5857 // We can perform truncated multiplications if we get to this point, because if the
5858 // fixup phase could not prove that it would be safe, it would have turned us into
5859 // a double multiplication.
5860 if (!shouldCheckOverflow(node->arithMode()))
5861 m_jit.mul32(reg1, reg2, result.gpr());
5862 else {
5863 speculationCheck(
5864 Overflow, JSValueRegs(), nullptr,
5865 m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
5866 }
5867
5868 // Check for negative zero, if the users of this node care about such things.
5869 if (shouldCheckNegativeZero(node->arithMode())) {
5870 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
5871 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Signed, reg1));
5872 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Signed, reg2));
5873 resultNonZero.link(&m_jit);
5874 }
5875
5876 strictInt32Result(result.gpr(), node);
5877 return;
5878 }
5879
5880#if USE(JSVALUE64)
5881 case Int52RepUse: {
5882 ASSERT(shouldCheckOverflow(node->arithMode()));
5883
5884 // This is super clever. We want to do an int52 multiplication and check the
5885 // int52 overflow bit. There is no direct hardware support for this, but we do
5886 // have the ability to do an int64 multiplication and check the int64 overflow
5887 // bit. We leverage that. Consider that a, b are int52 numbers inside int64
5888 // registers, with the high 12 bits being sign-extended. We can do:
5889 //
5890 // (a * (b << 12))
5891 //
5892 // This will give us a left-shifted int52 (value is in high 52 bits, low 16
5893 // bits are zero) plus the int52 overflow bit. I.e. whether this 64-bit
5894 // multiplication overflows is identical to whether the 'a * b' 52-bit
5895 // multiplication overflows.
5896 //
5897 // In our nomenclature, this is:
5898 //
5899 // strictInt52(a) * int52(b) => int52
5900 //
5901 // That is "strictInt52" means unshifted and "int52" means left-shifted by 16
5902 // bits.
5903 //
5904 // We don't care which of op1 or op2 serves as the left-shifted operand, so
5905 // we just do whatever is more convenient for op1 and have op2 do the
5906 // opposite. This ensures that we do at most one shift.
5907
5908 SpeculateWhicheverInt52Operand op1(this, node->child1());
5909 SpeculateWhicheverInt52Operand op2(this, node->child2(), OppositeShift, op1);
5910 GPRTemporary result(this);
5911
5912 GPRReg op1GPR = op1.gpr();
5913 GPRReg op2GPR = op2.gpr();
5914 GPRReg resultGPR = result.gpr();
5915
5916 m_jit.move(op1GPR, resultGPR);
5917 speculationCheck(
5918 Int52Overflow, JSValueRegs(), nullptr,
5919 m_jit.branchMul64(MacroAssembler::Overflow, op2GPR, resultGPR));
5920
5921 if (shouldCheckNegativeZero(node->arithMode())) {
5922 MacroAssembler::Jump resultNonZero = m_jit.branchTest64(
5923 MacroAssembler::NonZero, resultGPR);
5924 speculationCheck(
5925 NegativeZero, JSValueRegs(), nullptr,
5926 m_jit.branch64(MacroAssembler::LessThan, op1GPR, TrustedImm32(0)));
5927 speculationCheck(
5928 NegativeZero, JSValueRegs(), nullptr,
5929 m_jit.branch64(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
5930 resultNonZero.link(&m_jit);
5931 }
5932
5933 int52Result(resultGPR, node);
5934 return;
5935 }
5936#endif // USE(JSVALUE64)
5937
5938 case DoubleRepUse: {
5939 SpeculateDoubleOperand op1(this, node->child1());
5940 SpeculateDoubleOperand op2(this, node->child2());
5941 FPRTemporary result(this, op1, op2);
5942
5943 FPRReg reg1 = op1.fpr();
5944 FPRReg reg2 = op2.fpr();
5945
5946 m_jit.mulDouble(reg1, reg2, result.fpr());
5947
5948 doubleResult(result.fpr(), node);
5949 return;
5950 }
5951
5952 default:
5953 RELEASE_ASSERT_NOT_REACHED();
5954 return;
5955 }
5956}
5957
5958void SpeculativeJIT::compileValueDiv(Node* node)
5959{
5960 Edge& leftChild = node->child1();
5961 Edge& rightChild = node->child2();
5962
5963 // FIXME: add a fast path for BigInt32. Currently we go through the slow path, because of how ugly the code for Div gets.
5964 // https://bugs.webkit.org/show_bug.cgi?id=211041
5965
5966 if (node->isBinaryUseKind(HeapBigIntUse)) {
5967 SpeculateCellOperand left(this, leftChild);
5968 SpeculateCellOperand right(this, rightChild);
5969 GPRReg leftGPR = left.gpr();
5970 GPRReg rightGPR = right.gpr();
5971
5972 speculateHeapBigInt(leftChild, leftGPR);
5973 speculateHeapBigInt(rightChild, rightGPR);
5974
5975 flushRegisters();
5976 JSValueRegsFlushedCallResult result(this);
5977 JSValueRegs resultRegs = result.regs();
5978
5979 callOperation(operationDivHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
5980
5981 m_jit.exceptionCheck();
5982 jsValueResult(resultRegs, node);
5983 return;
5984 }
5985
5986 if (isKnownNotNumber(leftChild.node()) || isKnownNotNumber(rightChild.node()) || node->isBinaryUseKind(AnyBigIntUse) || node->isBinaryUseKind(BigInt32Use)) {
5987 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
5988 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
5989 speculate(node, leftChild);
5990 speculate(node, rightChild);
5991 JSValueRegs leftRegs = left.jsValueRegs();
5992 JSValueRegs rightRegs = right.jsValueRegs();
5993
5994 flushRegisters();
5995 JSValueRegsFlushedCallResult result(this);
5996 JSValueRegs resultRegs = result.regs();
5997 callOperation(operationValueDiv, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
5998 m_jit.exceptionCheck();
5999
6000 jsValueResult(resultRegs, node);
6001 return;
6002 }
6003
6004 ASSERT(node->isBinaryUseKind(UntypedUse));
6005
6006 std::optional<JSValueOperand> left;
6007 std::optional<JSValueOperand> right;
6008
6009 JSValueRegs leftRegs;
6010 JSValueRegs rightRegs;
6011
6012 FPRTemporary leftNumber(this);
6013 FPRTemporary rightNumber(this);
6014 FPRReg leftFPR = leftNumber.fpr();
6015 FPRReg rightFPR = rightNumber.fpr();
6016 FPRTemporary fprScratch(this);
6017 FPRReg scratchFPR = fprScratch.fpr();
6018
6019#if USE(JSVALUE64)
6020 GPRTemporary result(this);
6021 JSValueRegs resultRegs = JSValueRegs(result.gpr());
6022 GPRTemporary scratch(this);
6023 GPRReg scratchGPR = scratch.gpr();
6024#else
6025 GPRTemporary resultTag(this);
6026 GPRTemporary resultPayload(this);
6027 JSValueRegs resultRegs = JSValueRegs(resultPayload.gpr(), resultTag.gpr());
6028 GPRReg scratchGPR = resultTag.gpr();
6029#endif
6030
6031 SnippetOperand leftOperand(m_state.forNode(leftChild).resultType());
6032 SnippetOperand rightOperand(m_state.forNode(rightChild).resultType());
6033
6034 if (leftChild->isInt32Constant())
6035 leftOperand.setConstInt32(leftChild->asInt32());
6036#if USE(JSVALUE64)
6037 else if (leftChild->isDoubleConstant())
6038 leftOperand.setConstDouble(leftChild->asNumber());
6039#endif
6040
6041 if (leftOperand.isConst()) {
6042 // The snippet generator only supports 1 argument as a constant.
6043 // Ignore the rightChild's const-ness.
6044 } else if (rightChild->isInt32Constant())
6045 rightOperand.setConstInt32(rightChild->asInt32());
6046#if USE(JSVALUE64)
6047 else if (rightChild->isDoubleConstant())
6048 rightOperand.setConstDouble(rightChild->asNumber());
6049#endif
6050
6051 RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
6052
6053 if (!leftOperand.isConst()) {
6054 left.emplace(this, leftChild);
6055 leftRegs = left->jsValueRegs();
6056 }
6057 if (!rightOperand.isConst()) {
6058 right.emplace(this, rightChild);
6059 rightRegs = right->jsValueRegs();
6060 }
6061
6062 JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
6063 leftFPR, rightFPR, scratchGPR, scratchFPR);
6064 gen.generateFastPath(m_jit);
6065
6066 ASSERT(gen.didEmitFastPath());
6067 gen.endJumpList().append(m_jit.jump());
6068
6069 gen.slowPathJumpList().link(&m_jit);
6070 silentSpillAllRegisters(resultRegs);
6071
6072 if (leftOperand.isConst()) {
6073 leftRegs = resultRegs;
6074 m_jit.moveValue(leftChild->asJSValue(), leftRegs);
6075 }
6076 if (rightOperand.isConst()) {
6077 rightRegs = resultRegs;
6078 m_jit.moveValue(rightChild->asJSValue(), rightRegs);
6079 }
6080
6081 callOperation(operationValueDiv, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
6082
6083 silentFillAllRegisters();
6084 m_jit.exceptionCheck();
6085
6086 gen.endJumpList().link(&m_jit);
6087 jsValueResult(resultRegs, node);
6088}
6089
6090void SpeculativeJIT::compileArithDiv(Node* node)
6091{
6092 switch (node->binaryUseKind()) {
6093 case Int32Use: {
6094#if CPU(X86_64)
6095 SpeculateInt32Operand op1(this, node->child1());
6096 SpeculateInt32Operand op2(this, node->child2());
6097 GPRTemporary eax(this, X86Registers::eax);
6098 GPRTemporary edx(this, X86Registers::edx);
6099 GPRReg op1GPR = op1.gpr();
6100 GPRReg op2GPR = op2.gpr();
6101
6102 GPRReg op2TempGPR;
6103 GPRReg temp;
6104 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
6105 op2TempGPR = allocate();
6106 temp = op2TempGPR;
6107 } else {
6108 op2TempGPR = InvalidGPRReg;
6109 if (op1GPR == X86Registers::eax)
6110 temp = X86Registers::edx;
6111 else
6112 temp = X86Registers::eax;
6113 }
6114
6115 ASSERT(temp != op1GPR);
6116 ASSERT(temp != op2GPR);
6117
6118 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
6119
6120 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
6121
6122 JITCompiler::JumpList done;
6123 if (shouldCheckOverflow(node->arithMode())) {
6124 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
6125 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
6126 } else {
6127 // This is the case where we convert the result to an int after we're done, and we
6128 // already know that the denominator is either -1 or 0. So, if the denominator is
6129 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
6130 // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
6131 // are happy to fall through to a normal division, since we're just dividing
6132 // something by negative 1.
6133
6134 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
6135 m_jit.move(TrustedImm32(0), eax.gpr());
6136 done.append(m_jit.jump());
6137
6138 notZero.link(&m_jit);
6139 JITCompiler::Jump notNeg2ToThe31 =
6140 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
6141 m_jit.zeroExtend32ToWord(op1GPR, eax.gpr());
6142 done.append(m_jit.jump());
6143
6144 notNeg2ToThe31.link(&m_jit);
6145 }
6146
6147 safeDenominator.link(&m_jit);
6148
6149 // If the user cares about negative zero, then speculate that we're not about
6150 // to produce negative zero.
6151 if (shouldCheckNegativeZero(node->arithMode())) {
6152 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
6153 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
6154 numeratorNonZero.link(&m_jit);
6155 }
6156
6157 if (op2TempGPR != InvalidGPRReg) {
6158 m_jit.move(op2GPR, op2TempGPR);
6159 op2GPR = op2TempGPR;
6160 }
6161
6162 m_jit.move(op1GPR, eax.gpr());
6163 m_jit.x86ConvertToDoubleWord32();
6164 m_jit.x86Div32(op2GPR);
6165
6166 if (op2TempGPR != InvalidGPRReg)
6167 unlock(op2TempGPR);
6168
6169 // Check that there was no remainder. If there had been, then we'd be obligated to
6170 // produce a double result instead.
6171 if (shouldCheckOverflow(node->arithMode()))
6172 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
6173
6174 done.link(&m_jit);
6175 strictInt32Result(eax.gpr(), node);
6176#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
6177 SpeculateInt32Operand op1(this, node->child1());
6178 SpeculateInt32Operand op2(this, node->child2());
6179 GPRReg op1GPR = op1.gpr();
6180 GPRReg op2GPR = op2.gpr();
6181 GPRTemporary quotient(this);
6182 GPRTemporary multiplyAnswer(this);
6183
6184 // If the user cares about negative zero, then speculate that we're not about
6185 // to produce negative zero.
6186 if (shouldCheckNegativeZero(node->arithMode())) {
6187 MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
6188 speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
6189 numeratorNonZero.link(&m_jit);
6190 }
6191
6192 if (shouldCheckOverflow(node->arithMode()))
6193 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(MacroAssembler::Zero, op2GPR));
6194
6195 m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
6196
6197 // Check that there was no remainder. If there had been, then we'd be obligated to
6198 // produce a double result instead.
6199 if (shouldCheckOverflow(node->arithMode())) {
6200 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
6201 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
6202 }
6203
6204 strictInt32Result(quotient.gpr(), node);
6205#else
6206 RELEASE_ASSERT_NOT_REACHED();
6207#endif
6208 break;
6209 }
6210
6211 case DoubleRepUse: {
6212 SpeculateDoubleOperand op1(this, node->child1());
6213 SpeculateDoubleOperand op2(this, node->child2());
6214 FPRTemporary result(this, op1);
6215
6216 FPRReg reg1 = op1.fpr();
6217 FPRReg reg2 = op2.fpr();
6218 m_jit.divDouble(reg1, reg2, result.fpr());
6219
6220 doubleResult(result.fpr(), node);
6221 break;
6222 }
6223
6224 default:
6225 RELEASE_ASSERT_NOT_REACHED();
6226 break;
6227 }
6228}
6229
6230void SpeculativeJIT::compileArithFRound(Node* node)
6231{
6232 if (node->child1().useKind() == DoubleRepUse) {
6233 SpeculateDoubleOperand op1(this, node->child1());
6234 FPRTemporary result(this, op1);
6235 m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
6236 m_jit.convertFloatToDouble(result.fpr(), result.fpr());
6237 doubleResult(result.fpr(), node);
6238 return;
6239 }
6240
6241 JSValueOperand op1(this, node->child1());
6242 JSValueRegs op1Regs = op1.jsValueRegs();
6243 flushRegisters();
6244 FPRResult result(this);
6245 callOperation(operationArithFRound, result.fpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
6246 m_jit.exceptionCheck();
6247 doubleResult(result.fpr(), node);
6248}
6249
6250void SpeculativeJIT::compileValueMod(Node* node)
6251{
6252 Edge& leftChild = node->child1();
6253 Edge& rightChild = node->child2();
6254
6255 // FIXME: add a fast path for BigInt32. Currently we go through the slow path, because of how ugly the code for Mod gets.
6256
6257 if (node->binaryUseKind() == HeapBigIntUse) {
6258 SpeculateCellOperand left(this, leftChild);
6259 SpeculateCellOperand right(this, rightChild);
6260 GPRReg leftGPR = left.gpr();
6261 GPRReg rightGPR = right.gpr();
6262
6263 speculateHeapBigInt(leftChild, leftGPR);
6264 speculateHeapBigInt(rightChild, rightGPR);
6265
6266 flushRegisters();
6267 JSValueRegsFlushedCallResult result(this);
6268 JSValueRegs resultRegs = result.regs();
6269
6270 callOperation(operationModHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
6271
6272 m_jit.exceptionCheck();
6273 jsValueResult(resultRegs, node);
6274 return;
6275 }
6276
6277 DFG_ASSERT(m_jit.graph(), node, node->binaryUseKind() == UntypedUse || node->binaryUseKind() == AnyBigIntUse || node->binaryUseKind() == BigInt32Use, node->binaryUseKind());
6278 JSValueOperand op1(this, leftChild, ManualOperandSpeculation);
6279 JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
6280 speculate(node, leftChild);
6281 speculate(node, rightChild);
6282 JSValueRegs op1Regs = op1.jsValueRegs();
6283 JSValueRegs op2Regs = op2.jsValueRegs();
6284 flushRegisters();
6285 JSValueRegsFlushedCallResult result(this);
6286 JSValueRegs resultRegs = result.regs();
6287 callOperation(operationValueMod, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs, op2Regs);
6288 m_jit.exceptionCheck();
6289 jsValueResult(resultRegs, node);
6290}
6291
6292void SpeculativeJIT::compileArithMod(Node* node)
6293{
6294 switch (node->binaryUseKind()) {
6295 case Int32Use: {
6296 // In the fast path, the dividend value could be the final result
6297 // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
6298 SpeculateStrictInt32Operand op1(this, node->child1());
6299
6300 if (node->child2()->isInt32Constant()) {
6301 int32_t divisor = node->child2()->asInt32();
6302 if (divisor > 1 && hasOneBitSet(divisor)) {
6303 unsigned logarithm = WTF::fastLog2(static_cast<uint32_t>(divisor));
6304 GPRReg dividendGPR = op1.gpr();
6305 GPRTemporary result(this);
6306 GPRReg resultGPR = result.gpr();
6307
6308 // This is what LLVM generates. It's pretty crazy. Here's my
6309 // attempt at understanding it.
6310
6311 // First, compute either divisor - 1, or 0, depending on whether
6312 // the dividend is negative:
6313 //
6314 // If dividend < 0: resultGPR = divisor - 1
6315 // If dividend >= 0: resultGPR = 0
6316 m_jit.move(dividendGPR, resultGPR);
6317 m_jit.rshift32(TrustedImm32(31), resultGPR);
6318 m_jit.urshift32(TrustedImm32(32 - logarithm), resultGPR);
6319
6320 // Add in the dividend, so that:
6321 //
6322 // If dividend < 0: resultGPR = dividend + divisor - 1
6323 // If dividend >= 0: resultGPR = dividend
6324 m_jit.add32(dividendGPR, resultGPR);
6325
6326 // Mask so as to only get the *high* bits. This rounds down
6327 // (towards negative infinity) resultGPR to the nearest multiple
6328 // of divisor, so that:
6329 //
6330 // If dividend < 0: resultGPR = floor((dividend + divisor - 1) / divisor)
6331 // If dividend >= 0: resultGPR = floor(dividend / divisor)
6332 //
6333 // Note that this can be simplified to:
6334 //
6335 // If dividend < 0: resultGPR = ceil(dividend / divisor)
6336 // If dividend >= 0: resultGPR = floor(dividend / divisor)
6337 //
6338 // Note that if the dividend is negative, resultGPR will also be negative.
6339 // Regardless of the sign of dividend, resultGPR will be rounded towards
6340 // zero, because of how things are conditionalized.
6341 m_jit.and32(TrustedImm32(-divisor), resultGPR);
6342
6343 // Subtract resultGPR from dividendGPR, which yields the remainder:
6344 //
6345 // resultGPR = dividendGPR - resultGPR
6346 m_jit.neg32(resultGPR);
6347 m_jit.add32(dividendGPR, resultGPR);
6348
6349 if (shouldCheckNegativeZero(node->arithMode())) {
6350 // Check that we're not about to create negative zero.
6351 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
6352 speculationCheck(NegativeZero, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
6353 numeratorPositive.link(&m_jit);
6354 }
6355
6356 strictInt32Result(resultGPR, node);
6357 return;
6358 }
6359 }
6360
6361#if CPU(X86_64)
6362 if (node->child2()->isInt32Constant()) {
6363 int32_t divisor = node->child2()->asInt32();
6364 if (divisor && divisor != -1) {
6365 GPRReg op1Gpr = op1.gpr();
6366
6367 GPRTemporary eax(this, X86Registers::eax);
6368 GPRTemporary edx(this, X86Registers::edx);
6369 GPRTemporary scratch(this);
6370 GPRReg scratchGPR = scratch.gpr();
6371
6372 GPRReg op1SaveGPR;
6373 if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
6374 op1SaveGPR = allocate();
6375 ASSERT(op1Gpr != op1SaveGPR);
6376 m_jit.move(op1Gpr, op1SaveGPR);
6377 } else
6378 op1SaveGPR = op1Gpr;
6379 ASSERT(op1SaveGPR != X86Registers::eax);
6380 ASSERT(op1SaveGPR != X86Registers::edx);
6381
6382 m_jit.move(op1Gpr, eax.gpr());
6383 m_jit.move(TrustedImm32(divisor), scratchGPR);
6384 m_jit.x86ConvertToDoubleWord32();
6385 m_jit.x86Div32(scratchGPR);
6386 if (shouldCheckNegativeZero(node->arithMode())) {
6387 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
6388 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
6389 numeratorPositive.link(&m_jit);
6390 }
6391
6392 if (op1SaveGPR != op1Gpr)
6393 unlock(op1SaveGPR);
6394
6395 strictInt32Result(edx.gpr(), node);
6396 return;
6397 }
6398 }
6399#endif
6400
6401 SpeculateInt32Operand op2(this, node->child2());
6402#if CPU(X86_64)
6403 GPRTemporary eax(this, X86Registers::eax);
6404 GPRTemporary edx(this, X86Registers::edx);
6405 GPRReg op1GPR = op1.gpr();
6406 GPRReg op2GPR = op2.gpr();
6407
6408 GPRReg op2TempGPR;
6409 GPRReg temp;
6410 GPRReg op1SaveGPR;
6411
6412 if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
6413 op2TempGPR = allocate();
6414 temp = op2TempGPR;
6415 } else {
6416 op2TempGPR = InvalidGPRReg;
6417 if (op1GPR == X86Registers::eax)
6418 temp = X86Registers::edx;
6419 else
6420 temp = X86Registers::eax;
6421 }
6422
6423 if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
6424 op1SaveGPR = allocate();
6425 ASSERT(op1GPR != op1SaveGPR);
6426 m_jit.move(op1GPR, op1SaveGPR);
6427 } else
6428 op1SaveGPR = op1GPR;
6429
6430 ASSERT(temp != op1GPR);
6431 ASSERT(temp != op2GPR);
6432 ASSERT(op1SaveGPR != X86Registers::eax);
6433 ASSERT(op1SaveGPR != X86Registers::edx);
6434
6435 m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
6436
6437 JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
6438
6439 JITCompiler::JumpList done;
6440
6441 // FIXME: -2^31 / -1 will actually yield negative zero, so we could have a
6442 // separate case for that. But it probably doesn't matter so much.
6443 if (shouldCheckOverflow(node->arithMode())) {
6444 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
6445 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
6446 } else {
6447 // This is the case where we convert the result to an int after we're done, and we
6448 // already know that the denominator is either -1 or 0. So, if the denominator is
6449 // zero, then the result should be zero. If the denominator is not zero (i.e. it's
6450 // -1) and the numerator is -2^31 then the result should be 0. Otherwise we are
6451 // happy to fall through to a normal division, since we're just dividing something
6452 // by negative 1.
6453
6454 JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
6455 m_jit.move(TrustedImm32(0), edx.gpr());
6456 done.append(m_jit.jump());
6457
6458 notZero.link(&m_jit);
6459 JITCompiler::Jump notNeg2ToThe31 =
6460 m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
6461 m_jit.move(TrustedImm32(0), edx.gpr());
6462 done.append(m_jit.jump());
6463
6464 notNeg2ToThe31.link(&m_jit);
6465 }
6466
6467 safeDenominator.link(&m_jit);
6468
6469 if (op2TempGPR != InvalidGPRReg) {
6470 m_jit.move(op2GPR, op2TempGPR);
6471 op2GPR = op2TempGPR;
6472 }
6473
6474 m_jit.move(op1GPR, eax.gpr());
6475 m_jit.x86ConvertToDoubleWord32();
6476 m_jit.x86Div32(op2GPR);
6477
6478 if (op2TempGPR != InvalidGPRReg)
6479 unlock(op2TempGPR);
6480
6481 // Check that we're not about to create negative zero.
6482 if (shouldCheckNegativeZero(node->arithMode())) {
6483 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
6484 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
6485 numeratorPositive.link(&m_jit);
6486 }
6487
6488 if (op1SaveGPR != op1GPR)
6489 unlock(op1SaveGPR);
6490
6491 done.link(&m_jit);
6492 strictInt32Result(edx.gpr(), node);
6493
6494#elif HAVE(ARM_IDIV_INSTRUCTIONS) || CPU(ARM64)
6495 GPRTemporary temp(this);
6496 GPRTemporary quotientThenRemainder(this);
6497 GPRTemporary multiplyAnswer(this);
6498 GPRReg dividendGPR = op1.gpr();
6499 GPRReg divisorGPR = op2.gpr();
6500 GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
6501 GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
6502
6503 JITCompiler::JumpList done;
6504
6505 if (shouldCheckOverflow(node->arithMode()))
6506 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, divisorGPR));
6507 else {
6508 JITCompiler::Jump denominatorNotZero = m_jit.branchTest32(JITCompiler::NonZero, divisorGPR);
6509 // We know that the low 32-bit of divisorGPR is 0, but we don't know if the high bits are.
6510 // So, use TrustedImm32(0) on ARM instead because done expects the result to be in DataFormatInt32.
6511 // Using an immediate 0 doesn't cost anything extra on ARM.
6512 m_jit.move(TrustedImm32(0), quotientThenRemainderGPR);
6513 done.append(m_jit.jump());
6514 denominatorNotZero.link(&m_jit);
6515 }
6516
6517 m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
6518 // FIXME: It seems like there are cases where we don't need this? What if we have
6519 // arithMode() == Arith::Unchecked?
6520 // https://bugs.webkit.org/show_bug.cgi?id=126444
6521 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
6522#if HAVE(ARM_IDIV_INSTRUCTIONS)
6523 m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
6524#else
6525 m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
6526#endif
6527
6528 // If the user cares about negative zero, then speculate that we're not about
6529 // to produce negative zero.
6530 if (shouldCheckNegativeZero(node->arithMode())) {
6531 // Check that we're not about to create negative zero.
6532 JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
6533 speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
6534 numeratorPositive.link(&m_jit);
6535 }
6536
6537 done.link(&m_jit);
6538
6539 strictInt32Result(quotientThenRemainderGPR, node);
6540#else // not architecture that can do integer division
6541 RELEASE_ASSERT_NOT_REACHED();
6542#endif
6543 return;
6544 }
6545
6546 case DoubleRepUse: {
6547 SpeculateDoubleOperand op1(this, node->child1());
6548 SpeculateDoubleOperand op2(this, node->child2());
6549
6550 FPRReg op1FPR = op1.fpr();
6551 FPRReg op2FPR = op2.fpr();
6552
6553 flushRegisters();
6554
6555 FPRResult result(this);
6556
6557 callOperation(Math::fmodDouble, result.fpr(), op1FPR, op2FPR);
6558
6559 doubleResult(result.fpr(), node);
6560 return;
6561 }
6562
6563 default:
6564 RELEASE_ASSERT_NOT_REACHED();
6565 return;
6566 }
6567}
6568
6569void SpeculativeJIT::compileArithRounding(Node* node)
6570{
6571 if (node->child1().useKind() == DoubleRepUse) {
6572 SpeculateDoubleOperand value(this, node->child1());
6573 FPRReg valueFPR = value.fpr();
6574
6575 auto setResult = [&] (FPRReg resultFPR) {
6576 if (producesInteger(node->arithRoundingMode())) {
6577 GPRTemporary roundedResultAsInt32(this);
6578 FPRTemporary scratch(this);
6579 FPRReg scratchFPR = scratch.fpr();
6580 GPRReg resultGPR = roundedResultAsInt32.gpr();
6581 JITCompiler::JumpList failureCases;
6582 m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR, shouldCheckNegativeZero(node->arithRoundingMode()));
6583 speculationCheck(Overflow, JSValueRegs(), node, failureCases);
6584
6585 strictInt32Result(resultGPR, node);
6586 } else
6587 doubleResult(resultFPR, node);
6588 };
6589
6590 if (m_jit.supportsFloatingPointRounding()) {
6591 switch (node->op()) {
6592 case ArithRound: {
6593 FPRTemporary result(this);
6594 FPRReg resultFPR = result.fpr();
6595 if (producesInteger(node->arithRoundingMode()) && !shouldCheckNegativeZero(node->arithRoundingMode())) {
6596 static constexpr double halfConstant = 0.5;
6597 m_jit.loadDouble(TrustedImmPtr(&halfConstant), resultFPR);
6598 m_jit.addDouble(valueFPR, resultFPR);
6599 m_jit.floorDouble(resultFPR, resultFPR);
6600 } else {
6601 m_jit.ceilDouble(valueFPR, resultFPR);
6602
6603 FPRTemporary scratch(this);
6604 FPRReg scratchFPR = scratch.fpr();
6605 static constexpr double halfConstant = -0.5;
6606 m_jit.loadDouble(TrustedImmPtr(&halfConstant), scratchFPR);
6607 m_jit.addDouble(resultFPR, scratchFPR);
6608
6609 JITCompiler::Jump shouldUseCeiled = m_jit.branchDouble(JITCompiler::DoubleLessThanOrEqualAndOrdered, scratchFPR, valueFPR);
6610 static constexpr double oneConstant = -1.0;
6611 m_jit.loadDouble(TrustedImmPtr(&oneConstant), scratchFPR);
6612 m_jit.addDouble(scratchFPR, resultFPR);
6613 shouldUseCeiled.link(&m_jit);
6614 }
6615 setResult(resultFPR);
6616 return;
6617 }
6618
6619 case ArithFloor: {
6620 FPRTemporary rounded(this);
6621 FPRReg resultFPR = rounded.fpr();
6622 m_jit.floorDouble(valueFPR, resultFPR);
6623 setResult(resultFPR);
6624 return;
6625 }
6626
6627 case ArithCeil: {
6628 FPRTemporary rounded(this);
6629 FPRReg resultFPR = rounded.fpr();
6630 m_jit.ceilDouble(valueFPR, resultFPR);
6631 setResult(resultFPR);
6632 return;
6633 }
6634
6635 case ArithTrunc: {
6636 FPRTemporary rounded(this);
6637 FPRReg resultFPR = rounded.fpr();
6638 m_jit.roundTowardZeroDouble(valueFPR, resultFPR);
6639 setResult(resultFPR);
6640 return;
6641 }
6642
6643 default:
6644 RELEASE_ASSERT_NOT_REACHED();
6645 }
6646 } else {
6647 flushRegisters();
6648 FPRResult roundedResultAsDouble(this);
6649 FPRReg resultFPR = roundedResultAsDouble.fpr();
6650 if (node->op() == ArithRound)
6651 callOperation(Math::roundDouble, resultFPR, valueFPR);
6652 else if (node->op() == ArithFloor)
6653 callOperation(Math::floorDouble, resultFPR, valueFPR);
6654 else if (node->op() == ArithCeil)
6655 callOperation(Math::ceilDouble, resultFPR, valueFPR);
6656 else {
6657 ASSERT(node->op() == ArithTrunc);
6658 callOperation(Math::truncDouble, resultFPR, valueFPR);
6659 }
6660 setResult(resultFPR);
6661 }
6662 return;
6663 }
6664
6665 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
6666
6667 JSValueOperand argument(this, node->child1());
6668 JSValueRegs argumentRegs = argument.jsValueRegs();
6669
6670 flushRegisters();
6671 JSValueRegsFlushedCallResult result(this);
6672 JSValueRegs resultRegs = result.regs();
6673 J_JITOperation_GJ operation = nullptr;
6674 if (node->op() == ArithRound)
6675 operation = operationArithRound;
6676 else if (node->op() == ArithFloor)
6677 operation = operationArithFloor;
6678 else if (node->op() == ArithCeil)
6679 operation = operationArithCeil;
6680 else {
6681 ASSERT(node->op() == ArithTrunc);
6682 operation = operationArithTrunc;
6683 }
6684 callOperation(operation, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentRegs);
6685 m_jit.exceptionCheck();
6686 jsValueResult(resultRegs, node);
6687}
6688
6689void SpeculativeJIT::compileArithUnary(Node* node)
6690{
6691 compileArithDoubleUnaryOp(node, arithUnaryFunction(node->arithUnaryType()), arithUnaryOperation(node->arithUnaryType()));
6692}
6693
6694void SpeculativeJIT::compileArithSqrt(Node* node)
6695{
6696 if (node->child1().useKind() == DoubleRepUse) {
6697 SpeculateDoubleOperand op1(this, node->child1());
6698 FPRReg op1FPR = op1.fpr();
6699
6700 if (!MacroAssembler::supportsFloatingPointSqrt() || !Options::useArchitectureSpecificOptimizations()) {
6701 flushRegisters();
6702 FPRResult result(this);
6703 callOperation(Math::sqrtDouble, result.fpr(), op1FPR);
6704 doubleResult(result.fpr(), node);
6705 } else {
6706 FPRTemporary result(this, op1);
6707 m_jit.sqrtDouble(op1.fpr(), result.fpr());
6708 doubleResult(result.fpr(), node);
6709 }
6710 return;
6711 }
6712
6713 JSValueOperand op1(this, node->child1());
6714 JSValueRegs op1Regs = op1.jsValueRegs();
6715 flushRegisters();
6716 FPRResult result(this);
6717 callOperation(operationArithSqrt, result.fpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
6718 m_jit.exceptionCheck();
6719 doubleResult(result.fpr(), node);
6720}
6721
6722void SpeculativeJIT::compileArithMinMax(Node* node)
6723{
6724 switch (node->binaryUseKind()) {
6725 case Int32Use: {
6726 SpeculateStrictInt32Operand op1(this, node->child1());
6727 SpeculateStrictInt32Operand op2(this, node->child2());
6728 GPRTemporary result(this, Reuse, op1);
6729
6730 GPRReg op1GPR = op1.gpr();
6731 GPRReg op2GPR = op2.gpr();
6732 GPRReg resultGPR = result.gpr();
6733
6734 MacroAssembler::Jump op1Less = m_jit.branch32(node->op() == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR);
6735 m_jit.move(op2GPR, resultGPR);
6736 if (op1GPR != resultGPR) {
6737 MacroAssembler::Jump done = m_jit.jump();
6738 op1Less.link(&m_jit);
6739 m_jit.move(op1GPR, resultGPR);
6740 done.link(&m_jit);
6741 } else
6742 op1Less.link(&m_jit);
6743
6744 strictInt32Result(resultGPR, node);
6745 break;
6746 }
6747
6748 case DoubleRepUse: {
6749 SpeculateDoubleOperand op1(this, node->child1());
6750 SpeculateDoubleOperand op2(this, node->child2());
6751 FPRTemporary result(this, op1);
6752
6753 FPRReg op1FPR = op1.fpr();
6754 FPRReg op2FPR = op2.fpr();
6755 FPRReg resultFPR = result.fpr();
6756
6757 MacroAssembler::JumpList done;
6758
6759 MacroAssembler::Jump op1Less = m_jit.branchDouble(node->op() == ArithMin ? MacroAssembler::DoubleLessThanAndOrdered : MacroAssembler::DoubleGreaterThanAndOrdered, op1FPR, op2FPR);
6760 MacroAssembler::Jump opNotEqualOrUnordered = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, op1FPR, op2FPR);
6761
6762 // The spec for Math.min and Math.max states that +0 is considered to be larger than -0.
6763 if (node->op() == ArithMin)
6764 m_jit.orDouble(op1FPR, op2FPR, resultFPR);
6765 else
6766 m_jit.andDouble(op1FPR, op2FPR, resultFPR);
6767
6768 done.append(m_jit.jump());
6769
6770 opNotEqualOrUnordered.link(&m_jit);
6771 // op2 is either the lesser one or one of then is NaN
6772 MacroAssembler::Jump op2Less = m_jit.branchDouble(node->op() == ArithMin ? MacroAssembler::DoubleGreaterThanAndOrdered : MacroAssembler::DoubleLessThanAndOrdered, op1FPR, op2FPR);
6773
6774 // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
6775 // op1 + op2 and putting it into result.
6776 m_jit.addDouble(op1FPR, op2FPR, resultFPR);
6777 done.append(m_jit.jump());
6778
6779 op2Less.link(&m_jit);
6780 m_jit.moveDouble(op2FPR, resultFPR);
6781
6782 if (op1FPR != resultFPR) {
6783 done.append(m_jit.jump());
6784
6785 op1Less.link(&m_jit);
6786 m_jit.moveDouble(op1FPR, resultFPR);
6787 } else
6788 op1Less.link(&m_jit);
6789
6790 done.link(&m_jit);
6791
6792 doubleResult(resultFPR, node);
6793 break;
6794 }
6795
6796 default:
6797 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
6798 break;
6799 }
6800}
6801
6802// For small positive integers , it is worth doing a tiny inline loop to exponentiate the base.
6803// Every register is clobbered by this helper.
6804static MacroAssembler::Jump compileArithPowIntegerFastPath(JITCompiler& assembler, FPRReg xOperand, GPRReg yOperand, FPRReg result)
6805{
6806 MacroAssembler::JumpList skipFastPath;
6807 skipFastPath.append(assembler.branch32(MacroAssembler::Above, yOperand, MacroAssembler::TrustedImm32(maxExponentForIntegerMathPow)));
6808
6809 static constexpr double oneConstant = 1.0;
6810 assembler.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), result);
6811
6812 MacroAssembler::Label startLoop(assembler.label());
6813 MacroAssembler::Jump exponentIsEven = assembler.branchTest32(MacroAssembler::Zero, yOperand, MacroAssembler::TrustedImm32(1));
6814 assembler.mulDouble(xOperand, result);
6815 exponentIsEven.link(&assembler);
6816 assembler.mulDouble(xOperand, xOperand);
6817 assembler.rshift32(MacroAssembler::TrustedImm32(1), yOperand);
6818 assembler.branchTest32(MacroAssembler::NonZero, yOperand).linkTo(startLoop, &assembler);
6819
6820 MacroAssembler::Jump skipSlowPath = assembler.jump();
6821 skipFastPath.link(&assembler);
6822
6823 return skipSlowPath;
6824}
6825
6826void SpeculativeJIT::compileValuePow(Node* node)
6827{
6828 Edge& leftChild = node->child1();
6829 Edge& rightChild = node->child2();
6830
6831 // FIXME: do we want a fast path for BigInt32 for Pow? I expect it would overflow pretty often.
6832 if (node->binaryUseKind() == HeapBigIntUse) {
6833 SpeculateCellOperand left(this, leftChild);
6834 SpeculateCellOperand right(this, rightChild);
6835 GPRReg leftGPR = left.gpr();
6836 GPRReg rightGPR = right.gpr();
6837
6838 speculateHeapBigInt(leftChild, leftGPR);
6839 speculateHeapBigInt(rightChild, rightGPR);
6840
6841 flushRegisters();
6842 JSValueRegsFlushedCallResult result(this);
6843 JSValueRegs resultRegs = result.regs();
6844
6845 callOperation(operationPowHeapBigInt, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
6846
6847 m_jit.exceptionCheck();
6848 jsValueResult(resultRegs, node);
6849 return;
6850 }
6851
6852 DFG_ASSERT(m_jit.graph(), node, node->binaryUseKind() == UntypedUse || node->binaryUseKind() == AnyBigIntUse || node->binaryUseKind() == BigInt32Use, node->binaryUseKind());
6853
6854 JSValueOperand left(this, leftChild, ManualOperandSpeculation);
6855 JSValueOperand right(this, rightChild, ManualOperandSpeculation);
6856 speculate(node, leftChild);
6857 speculate(node, rightChild);
6858 JSValueRegs leftRegs = left.jsValueRegs();
6859 JSValueRegs rightRegs = right.jsValueRegs();
6860
6861 flushRegisters();
6862 JSValueRegsFlushedCallResult result(this);
6863 JSValueRegs resultRegs = result.regs();
6864 callOperation(operationValuePow, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftRegs, rightRegs);
6865 m_jit.exceptionCheck();
6866
6867 jsValueResult(resultRegs, node);
6868}
6869
6870void SpeculativeJIT::compileArithPow(Node* node)
6871{
6872 if (node->child2().useKind() == Int32Use) {
6873 SpeculateDoubleOperand xOperand(this, node->child1());
6874 SpeculateInt32Operand yOperand(this, node->child2());
6875 FPRReg xOperandfpr = xOperand.fpr();
6876 GPRReg yOperandGpr = yOperand.gpr();
6877 FPRTemporary yOperandfpr(this);
6878
6879 flushRegisters();
6880
6881 FPRResult result(this);
6882 FPRReg resultFpr = result.fpr();
6883
6884 FPRTemporary xOperandCopy(this);
6885 FPRReg xOperandCopyFpr = xOperandCopy.fpr();
6886 m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
6887
6888 GPRTemporary counter(this);
6889 GPRReg counterGpr = counter.gpr();
6890 m_jit.move(yOperandGpr, counterGpr);
6891
6892 MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, counterGpr, resultFpr);
6893 m_jit.convertInt32ToDouble(yOperandGpr, yOperandfpr.fpr());
6894 callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr.fpr());
6895
6896 skipFallback.link(&m_jit);
6897 doubleResult(resultFpr, node);
6898 return;
6899 }
6900
6901 if (node->child2()->isDoubleConstant()) {
6902 double exponent = node->child2()->asNumber();
6903 static constexpr double infinityConstant = std::numeric_limits<double>::infinity();
6904 static constexpr double minusInfinityConstant = -std::numeric_limits<double>::infinity();
6905 if (exponent == 0.5) {
6906 SpeculateDoubleOperand xOperand(this, node->child1());
6907 FPRTemporary result(this);
6908 FPRReg xOperandFpr = xOperand.fpr();
6909 FPRReg resultFpr = result.fpr();
6910
6911 m_jit.moveZeroToDouble(resultFpr);
6912 MacroAssembler::Jump xIsZeroOrNegativeZero = m_jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, xOperandFpr, resultFpr);
6913
6914 m_jit.loadDouble(TrustedImmPtr(&minusInfinityConstant), resultFpr);
6915 MacroAssembler::Jump xIsMinusInfinity = m_jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, xOperandFpr, resultFpr);
6916 m_jit.sqrtDouble(xOperandFpr, resultFpr);
6917 MacroAssembler::Jump doneWithSqrt = m_jit.jump();
6918
6919 xIsMinusInfinity.link(&m_jit);
6920 if (isX86())
6921 m_jit.loadDouble(TrustedImmPtr(&infinityConstant), resultFpr);
6922 else
6923 m_jit.absDouble(resultFpr, resultFpr);
6924
6925 xIsZeroOrNegativeZero.link(&m_jit);
6926 doneWithSqrt.link(&m_jit);
6927 doubleResult(resultFpr, node);
6928 return;
6929 }
6930 if (exponent == -0.5) {
6931 SpeculateDoubleOperand xOperand(this, node->child1());
6932 FPRTemporary scratch(this);
6933 FPRTemporary result(this);
6934 FPRReg xOperandFpr = xOperand.fpr();
6935 FPRReg scratchFPR = scratch.fpr();
6936 FPRReg resultFpr = result.fpr();
6937
6938 m_jit.moveZeroToDouble(resultFpr);
6939 MacroAssembler::Jump xIsZeroOrNegativeZero = m_jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, xOperandFpr, resultFpr);
6940
6941 m_jit.loadDouble(TrustedImmPtr(&minusInfinityConstant), resultFpr);
6942 MacroAssembler::Jump xIsMinusInfinity = m_jit.branchDouble(MacroAssembler::DoubleEqualAndOrdered, xOperandFpr, resultFpr);
6943
6944 static constexpr double oneConstant = 1.;
6945 m_jit.loadDouble(TrustedImmPtr(&oneConstant), resultFpr);
6946 m_jit.sqrtDouble(xOperandFpr, scratchFPR);
6947 m_jit.divDouble(resultFpr, scratchFPR, resultFpr);
6948 MacroAssembler::Jump doneWithSqrt = m_jit.jump();
6949
6950 xIsZeroOrNegativeZero.link(&m_jit);
6951 m_jit.loadDouble(TrustedImmPtr(&infinityConstant), resultFpr);
6952 MacroAssembler::Jump doneWithBaseZero = m_jit.jump();
6953
6954 xIsMinusInfinity.link(&m_jit);
6955 m_jit.moveZeroToDouble(resultFpr);
6956
6957 doneWithBaseZero.link(&m_jit);
6958 doneWithSqrt.link(&m_jit);
6959 doubleResult(resultFpr, node);
6960 return;
6961 }
6962 }
6963
6964 SpeculateDoubleOperand xOperand(this, node->child1());
6965 SpeculateDoubleOperand yOperand(this, node->child2());
6966 FPRReg xOperandfpr = xOperand.fpr();
6967 FPRReg yOperandfpr = yOperand.fpr();
6968
6969 flushRegisters();
6970
6971 FPRResult result(this);
6972 FPRReg resultFpr = result.fpr();
6973
6974 FPRTemporary xOperandCopy(this);
6975 FPRReg xOperandCopyFpr = xOperandCopy.fpr();
6976
6977 FPRTemporary scratch(this);
6978 FPRReg scratchFpr = scratch.fpr();
6979
6980 GPRTemporary yOperandInteger(this);
6981 GPRReg yOperandIntegerGpr = yOperandInteger.gpr();
6982 MacroAssembler::JumpList failedExponentConversionToInteger;
6983 m_jit.branchConvertDoubleToInt32(yOperandfpr, yOperandIntegerGpr, failedExponentConversionToInteger, scratchFpr, false);
6984
6985 m_jit.moveDouble(xOperandfpr, xOperandCopyFpr);
6986 MacroAssembler::Jump skipFallback = compileArithPowIntegerFastPath(m_jit, xOperandCopyFpr, yOperandInteger.gpr(), resultFpr);
6987 failedExponentConversionToInteger.link(&m_jit);
6988
6989 callOperation(operationMathPow, resultFpr, xOperandfpr, yOperandfpr);
6990 skipFallback.link(&m_jit);
6991 doubleResult(resultFpr, node);
6992}
6993
6994// Returns true if the compare is fused with a subsequent branch.
6995bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_JITOperation_GJJ operation)
6996{
6997 if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
6998 return true;
6999
7000 if (node->isBinaryUseKind(Int32Use)) {
7001 compileInt32Compare(node, condition);
7002 return false;
7003 }
7004
7005#if USE(BIGINT32)
7006 if (node->isBinaryUseKind(BigInt32Use)) {
7007 compileBigInt32Compare(node, condition);
7008 return false;
7009 }
7010#endif
7011
7012#if USE(JSVALUE64)
7013 if (node->isBinaryUseKind(Int52RepUse)) {
7014 compileInt52Compare(node, condition);
7015 return false;
7016 }
7017#endif // USE(JSVALUE64)
7018
7019 if (node->isBinaryUseKind(DoubleRepUse)) {
7020 compileDoubleCompare(node, doubleCondition);
7021 return false;
7022 }
7023
7024 if (node->isBinaryUseKind(StringUse)) {
7025 if (node->op() == CompareEq)
7026 compileStringEquality(node);
7027 else
7028 compileStringCompare(node, condition);
7029 return false;
7030 }
7031
7032 if (node->isBinaryUseKind(StringIdentUse)) {
7033 if (node->op() == CompareEq)
7034 compileStringIdentEquality(node);
7035 else
7036 compileStringIdentCompare(node, condition);
7037 return false;
7038 }
7039
7040 // FIXME: add HeapBigInt case here.
7041 // Not having it means that the compare will not be fused with the branch for this case.
7042
7043 if (node->op() == CompareEq) {
7044 if (node->isBinaryUseKind(BooleanUse)) {
7045 compileBooleanCompare(node, condition);
7046 return false;
7047 }
7048
7049 if (node->isBinaryUseKind(SymbolUse)) {
7050 compileSymbolEquality(node);
7051 return false;
7052 }
7053
7054 if (node->isBinaryUseKind(ObjectUse)) {
7055 compileObjectEquality(node);
7056 return false;
7057 }
7058
7059 if (node->isBinaryUseKind(ObjectUse, ObjectOrOtherUse)) {
7060 compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
7061 return false;
7062 }
7063
7064 if (node->isBinaryUseKind(ObjectOrOtherUse, ObjectUse)) {
7065 compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
7066 return false;
7067 }
7068
7069 if (!needsTypeCheck(node->child1(), SpecOther)) {
7070 nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child2());
7071 return false;
7072 }
7073
7074 if (!needsTypeCheck(node->child2(), SpecOther)) {
7075 nonSpeculativeNonPeepholeCompareNullOrUndefined(node->child1());
7076 return false;
7077 }
7078 }
7079
7080 genericJSValueNonPeepholeCompare(node, condition, operation);
7081 return false;
7082}
7083
7084void SpeculativeJIT::compileCompareUnsigned(Node* node, MacroAssembler::RelationalCondition condition)
7085{
7086 compileInt32Compare(node, condition);
7087}
7088
7089bool SpeculativeJIT::compileStrictEq(Node* node)
7090{
7091 if (node->isBinaryUseKind(BooleanUse)) {
7092 unsigned branchIndexInBlock = detectPeepHoleBranch();
7093 if (branchIndexInBlock != UINT_MAX) {
7094 Node* branchNode = m_block->at(branchIndexInBlock);
7095 compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
7096 use(node->child1());
7097 use(node->child2());
7098 m_indexInBlock = branchIndexInBlock;
7099 m_currentNode = branchNode;
7100 return true;
7101 }
7102 compileBooleanCompare(node, MacroAssembler::Equal);
7103 return false;
7104 }
7105
7106 if (node->isBinaryUseKind(Int32Use)) {
7107 unsigned branchIndexInBlock = detectPeepHoleBranch();
7108 if (branchIndexInBlock != UINT_MAX) {
7109 Node* branchNode = m_block->at(branchIndexInBlock);
7110 compilePeepHoleInt32Branch(node, branchNode, MacroAssembler::Equal);
7111 use(node->child1());
7112 use(node->child2());
7113 m_indexInBlock = branchIndexInBlock;
7114 m_currentNode = branchNode;
7115 return true;
7116 }
7117 compileInt32Compare(node, MacroAssembler::Equal);
7118 return false;
7119 }
7120
7121#if USE(BIGINT32)
7122 if (node->isBinaryUseKind(BigInt32Use)) {
7123 unsigned branchIndexInBlock = detectPeepHoleBranch();
7124 if (branchIndexInBlock != UINT_MAX) {
7125 Node* branchNode = m_block->at(branchIndexInBlock);
7126 compilePeepHoleBigInt32Branch(node, branchNode, MacroAssembler::Equal);
7127 use(node->child1());
7128 use(node->child2());
7129 m_indexInBlock = branchIndexInBlock;
7130 m_currentNode = branchNode;
7131 return true;
7132 }
7133 compileBigInt32Compare(node, MacroAssembler::Equal);
7134 return false;
7135 }
7136#endif
7137
7138#if USE(JSVALUE64)
7139 if (node->isBinaryUseKind(Int52RepUse)) {
7140 unsigned branchIndexInBlock = detectPeepHoleBranch();
7141 if (branchIndexInBlock != UINT_MAX) {
7142 Node* branchNode = m_block->at(branchIndexInBlock);
7143 compilePeepHoleInt52Branch(node, branchNode, MacroAssembler::Equal);
7144 use(node->child1());
7145 use(node->child2());
7146 m_indexInBlock = branchIndexInBlock;
7147 m_currentNode = branchNode;
7148 return true;
7149 }
7150 compileInt52Compare(node, MacroAssembler::Equal);
7151 return false;
7152 }
7153#endif // USE(JSVALUE64)
7154
7155 if (node->isBinaryUseKind(DoubleRepUse)) {
7156 unsigned branchIndexInBlock = detectPeepHoleBranch();
7157 if (branchIndexInBlock != UINT_MAX) {
7158 Node* branchNode = m_block->at(branchIndexInBlock);
7159 compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqualAndOrdered);
7160 use(node->child1());
7161 use(node->child2());
7162 m_indexInBlock = branchIndexInBlock;
7163 m_currentNode = branchNode;
7164 return true;
7165 }
7166 compileDoubleCompare(node, MacroAssembler::DoubleEqualAndOrdered);
7167 return false;
7168 }
7169
7170 if (node->isBinaryUseKind(SymbolUse)) {
7171 unsigned branchIndexInBlock = detectPeepHoleBranch();
7172 if (branchIndexInBlock != UINT_MAX) {
7173 Node* branchNode = m_block->at(branchIndexInBlock);
7174 compilePeepHoleSymbolEquality(node, branchNode);
7175 use(node->child1());
7176 use(node->child2());
7177 m_indexInBlock = branchIndexInBlock;
7178 m_currentNode = branchNode;
7179 return true;
7180 }
7181 compileSymbolEquality(node);
7182 return false;
7183 }
7184
7185#if !USE(BIGINT32)
7186 if (node->isBinaryUseKind(NotDoubleUse, NeitherDoubleNorHeapBigIntNorStringUse)) {
7187 Edge notDoubleChild = node->child1();
7188 Edge neitherDoubleNorHeapBigIntNorStringChild = node->child2();
7189 unsigned branchIndexInBlock = detectPeepHoleBranch();
7190 if (branchIndexInBlock != UINT_MAX) {
7191 Node* branchNode = m_block->at(branchIndexInBlock);
7192 compilePeepHoleNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(node, branchNode, notDoubleChild, neitherDoubleNorHeapBigIntNorStringChild);
7193 use(notDoubleChild);
7194 use(neitherDoubleNorHeapBigIntNorStringChild);
7195 m_indexInBlock = branchIndexInBlock;
7196 m_currentNode = branchNode;
7197 return true;
7198 }
7199 compileNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(node, notDoubleChild, neitherDoubleNorHeapBigIntNorStringChild);
7200 return false;
7201 }
7202 if (node->isBinaryUseKind(NeitherDoubleNorHeapBigIntNorStringUse, NotDoubleUse)) {
7203 Edge neitherDoubleNorHeapBigIntNorStringChild = node->child1();
7204 Edge notDoubleChild = node->child2();
7205 unsigned branchIndexInBlock = detectPeepHoleBranch();
7206 if (branchIndexInBlock != UINT_MAX) {
7207 Node* branchNode = m_block->at(branchIndexInBlock);
7208 compilePeepHoleNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(node, branchNode, notDoubleChild, neitherDoubleNorHeapBigIntNorStringChild);
7209 use(notDoubleChild);
7210 use(neitherDoubleNorHeapBigIntNorStringChild);
7211 m_indexInBlock = branchIndexInBlock;
7212 m_currentNode = branchNode;
7213 return true;
7214 }
7215 compileNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(node, notDoubleChild, neitherDoubleNorHeapBigIntNorStringChild);
7216 return false;
7217 }
7218#endif
7219
7220 if (node->isBinaryUseKind(HeapBigIntUse)) {
7221 compileHeapBigIntEquality(node);
7222 return false;
7223 }
7224
7225 if (node->isBinaryUseKind(SymbolUse, UntypedUse)) {
7226 compileSymbolUntypedEquality(node, node->child1(), node->child2());
7227 return false;
7228 }
7229
7230 if (node->isBinaryUseKind(UntypedUse, SymbolUse)) {
7231 compileSymbolUntypedEquality(node, node->child2(), node->child1());
7232 return false;
7233 }
7234
7235 if (node->isBinaryUseKind(StringUse)) {
7236 compileStringEquality(node);
7237 return false;
7238 }
7239
7240 if (node->isBinaryUseKind(StringIdentUse)) {
7241 compileStringIdentEquality(node);
7242 return false;
7243 }
7244
7245 if (node->isBinaryUseKind(ObjectUse, UntypedUse)) {
7246 unsigned branchIndexInBlock = detectPeepHoleBranch();
7247 if (branchIndexInBlock != UINT_MAX) {
7248 Node* branchNode = m_block->at(branchIndexInBlock);
7249 compilePeepHoleObjectStrictEquality(node->child1(), node->child2(), branchNode);
7250 use(node->child1());
7251 use(node->child2());
7252 m_indexInBlock = branchIndexInBlock;
7253 m_currentNode = branchNode;
7254 return true;
7255 }
7256 compileObjectStrictEquality(node->child1(), node->child2());
7257 return false;
7258 }
7259
7260 if (node->isBinaryUseKind(UntypedUse, ObjectUse)) {
7261 unsigned branchIndexInBlock = detectPeepHoleBranch();
7262 if (branchIndexInBlock != UINT_MAX) {
7263 Node* branchNode = m_block->at(branchIndexInBlock);
7264 compilePeepHoleObjectStrictEquality(node->child2(), node->child1(), branchNode);
7265 use(node->child1());
7266 use(node->child2());
7267 m_indexInBlock = branchIndexInBlock;
7268 m_currentNode = branchNode;
7269 return true;
7270 }
7271 compileObjectStrictEquality(node->child2(), node->child1());
7272 return false;
7273 }
7274
7275 if (node->isBinaryUseKind(ObjectUse)) {
7276 unsigned branchIndexInBlock = detectPeepHoleBranch();
7277 if (branchIndexInBlock != UINT_MAX) {
7278 Node* branchNode = m_block->at(branchIndexInBlock);
7279 compilePeepHoleObjectEquality(node, branchNode);
7280 use(node->child1());
7281 use(node->child2());
7282 m_indexInBlock = branchIndexInBlock;
7283 m_currentNode = branchNode;
7284 return true;
7285 }
7286 compileObjectEquality(node);
7287 return false;
7288 }
7289
7290 if (node->isBinaryUseKind(MiscUse, UntypedUse)
7291 || node->isBinaryUseKind(UntypedUse, MiscUse)) {
7292 compileMiscStrictEq(node);
7293 return false;
7294 }
7295
7296 if (node->isBinaryUseKind(StringIdentUse, NotStringVarUse)) {
7297 compileStringIdentToNotStringVarEquality(node, node->child1(), node->child2());
7298 return false;
7299 }
7300
7301 if (node->isBinaryUseKind(NotStringVarUse, StringIdentUse)) {
7302 compileStringIdentToNotStringVarEquality(node, node->child2(), node->child1());
7303 return false;
7304 }
7305
7306 if (node->isBinaryUseKind(StringUse, UntypedUse)) {
7307 compileStringToUntypedEquality(node, node->child1(), node->child2());
7308 return false;
7309 }
7310
7311 if (node->isBinaryUseKind(UntypedUse, StringUse)) {
7312 compileStringToUntypedEquality(node, node->child2(), node->child1());
7313 return false;
7314 }
7315
7316 ASSERT(node->isBinaryUseKind(UntypedUse) || node->isBinaryUseKind(AnyBigIntUse));
7317 return genericJSValueStrictEq(node);
7318}
7319
7320void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
7321{
7322 SpeculateBooleanOperand op1(this, node->child1());
7323 SpeculateBooleanOperand op2(this, node->child2());
7324 GPRTemporary result(this);
7325
7326 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
7327
7328 unblessedBooleanResult(result.gpr(), node);
7329}
7330
7331void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
7332{
7333 if (node->child1()->isInt32Constant()) {
7334 SpeculateInt32Operand op2(this, node->child2());
7335 GPRTemporary result(this, Reuse, op2);
7336 int32_t imm = node->child1()->asInt32();
7337 m_jit.compare32(condition, JITCompiler::Imm32(imm), op2.gpr(), result.gpr());
7338
7339 unblessedBooleanResult(result.gpr(), node);
7340 } else if (node->child2()->isInt32Constant()) {
7341 SpeculateInt32Operand op1(this, node->child1());
7342 GPRTemporary result(this, Reuse, op1);
7343 int32_t imm = node->child2()->asInt32();
7344 m_jit.compare32(condition, op1.gpr(), JITCompiler::Imm32(imm), result.gpr());
7345
7346 unblessedBooleanResult(result.gpr(), node);
7347 } else {
7348 SpeculateInt32Operand op1(this, node->child1());
7349 SpeculateInt32Operand op2(this, node->child2());
7350 GPRTemporary result(this, Reuse, op1, op2);
7351 m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
7352
7353 unblessedBooleanResult(result.gpr(), node);
7354 }
7355}
7356
7357void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
7358{
7359 SpeculateDoubleOperand op1(this, node->child1());
7360 SpeculateDoubleOperand op2(this, node->child2());
7361 GPRTemporary result(this);
7362
7363 FPRReg op1FPR = op1.fpr();
7364 FPRReg op2FPR = op2.fpr();
7365 GPRReg resultGPR = result.gpr();
7366
7367 m_jit.compareDouble(condition, op1FPR, op2FPR, resultGPR);
7368
7369 unblessedBooleanResult(resultGPR, node);
7370}
7371
7372void SpeculativeJIT::compileObjectEquality(Node* node)
7373{
7374 SpeculateCellOperand op1(this, node->child1());
7375 SpeculateCellOperand op2(this, node->child2());
7376 GPRTemporary result(this, Reuse, op1);
7377
7378 GPRReg op1GPR = op1.gpr();
7379 GPRReg op2GPR = op2.gpr();
7380 GPRReg resultGPR = result.gpr();
7381
7382 if (masqueradesAsUndefinedWatchpointIsStillValid()) {
7383 DFG_TYPE_CHECK(
7384 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
7385 DFG_TYPE_CHECK(
7386 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
7387 } else {
7388 DFG_TYPE_CHECK(
7389 JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
7390 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
7391 m_jit.branchTest8(
7392 MacroAssembler::NonZero,
7393 MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
7394 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
7395
7396 DFG_TYPE_CHECK(
7397 JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
7398 speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
7399 m_jit.branchTest8(
7400 MacroAssembler::NonZero,
7401 MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
7402 MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
7403 }
7404
7405 m_jit.comparePtr(MacroAssembler::Equal, op1GPR, op2GPR, resultGPR);
7406 unblessedBooleanResult(resultGPR, node);
7407}
7408
7409void SpeculativeJIT::compileSymbolEquality(Node* node)
7410{
7411 SpeculateCellOperand left(this, node->child1());
7412 SpeculateCellOperand right(this, node->child2());
7413 GPRTemporary result(this, Reuse, left, right);
7414
7415 GPRReg leftGPR = left.gpr();
7416 GPRReg rightGPR = right.gpr();
7417 GPRReg resultGPR = result.gpr();
7418
7419 speculateSymbol(node->child1(), leftGPR);
7420 speculateSymbol(node->child2(), rightGPR);
7421
7422 m_jit.comparePtr(JITCompiler::Equal, leftGPR, rightGPR, resultGPR);
7423 unblessedBooleanResult(resultGPR, node);
7424}
7425
7426void SpeculativeJIT::compilePeepHoleSymbolEquality(Node* node, Node* branchNode)
7427{
7428 SpeculateCellOperand left(this, node->child1());
7429 SpeculateCellOperand right(this, node->child2());
7430
7431 GPRReg leftGPR = left.gpr();
7432 GPRReg rightGPR = right.gpr();
7433
7434 speculateSymbol(node->child1(), leftGPR);
7435 speculateSymbol(node->child2(), rightGPR);
7436
7437 BasicBlock* taken = branchNode->branchData()->taken.block;
7438 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
7439
7440 if (taken == nextBlock()) {
7441 branchPtr(JITCompiler::NotEqual, leftGPR, rightGPR, notTaken);
7442 jump(taken);
7443 } else {
7444 branchPtr(JITCompiler::Equal, leftGPR, rightGPR, taken);
7445 jump(notTaken);
7446 }
7447}
7448
7449void SpeculativeJIT::compileNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(Node* node, Edge notDoubleChild, Edge neitherDoubleNorHeapBigIntNorStringChild)
7450{
7451 JSValueOperand left(this, notDoubleChild, ManualOperandSpeculation);
7452 JSValueOperand right(this, neitherDoubleNorHeapBigIntNorStringChild, ManualOperandSpeculation);
7453
7454 GPRTemporary temp(this);
7455#if USE(JSVALUE64)
7456 GPRTemporary result(this, Reuse, left, right);
7457#else
7458 GPRTemporary result(this);
7459#endif
7460 JSValueRegs leftRegs = left.jsValueRegs();
7461 JSValueRegs rightRegs = right.jsValueRegs();
7462 GPRReg tempGPR = temp.gpr();
7463 GPRReg resultGPR = result.gpr();
7464
7465 speculateNotDouble(notDoubleChild, leftRegs, tempGPR);
7466 speculateNeitherDoubleNorHeapBigIntNorString(neitherDoubleNorHeapBigIntNorStringChild, rightRegs, tempGPR);
7467
7468#if USE(JSVALUE64)
7469 m_jit.compare64(JITCompiler::Equal, left.gpr(), right.gpr(), result.gpr());
7470#else
7471 m_jit.move(TrustedImm32(0), result.gpr());
7472 JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, left.tagGPR(), right.tagGPR());
7473 m_jit.compare32(JITCompiler::Equal, left.payloadGPR(), right.payloadGPR(), result.gpr());
7474 notEqual.link(&m_jit);
7475#endif
7476 unblessedBooleanResult(resultGPR, node);
7477}
7478
7479void SpeculativeJIT::compilePeepHoleNotDoubleNeitherDoubleNorHeapBigIntNorStringStrictEquality(Node*, Node* branchNode, Edge notDoubleChild, Edge neitherDoubleNorHeapBigIntNorStringChild)
7480{
7481 JSValueOperand left(this, notDoubleChild, ManualOperandSpeculation);
7482 JSValueOperand right(this, neitherDoubleNorHeapBigIntNorStringChild, ManualOperandSpeculation);
7483
7484 GPRTemporary temp(this);
7485 JSValueRegs leftRegs = left.jsValueRegs();
7486 JSValueRegs rightRegs = right.jsValueRegs();
7487 GPRReg tempGPR = temp.gpr();
7488
7489 speculateNotDouble(notDoubleChild, leftRegs, tempGPR);
7490 speculateNeitherDoubleNorHeapBigIntNorString(neitherDoubleNorHeapBigIntNorStringChild, rightRegs, tempGPR);
7491
7492 BasicBlock* taken = branchNode->branchData()->taken.block;
7493 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
7494
7495#if USE(JSVALUE64)
7496 if (taken == nextBlock()) {
7497 branch64(JITCompiler::NotEqual, left.gpr(), right.gpr(), notTaken);
7498 jump(taken);
7499 } else {
7500 branch64(JITCompiler::Equal, left.gpr(), right.gpr(), taken);
7501 jump(notTaken);
7502 }
7503#else
7504 branch32(JITCompiler::NotEqual, left.tagGPR(), right.tagGPR(), notTaken);
7505 if (taken == nextBlock()) {
7506 branch32(JITCompiler::NotEqual, left.payloadGPR(), right.payloadGPR(), notTaken);
7507 jump(taken);
7508 } else {
7509 branch32(JITCompiler::Equal, left.payloadGPR(), right.payloadGPR(), taken);
7510 jump(notTaken);
7511 }
7512#endif
7513}
7514
7515void SpeculativeJIT::compileStringEquality(
7516 Node* node, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR, GPRReg leftTempGPR,
7517 GPRReg rightTempGPR, GPRReg leftTemp2GPR, GPRReg rightTemp2GPR,
7518 const JITCompiler::JumpList& fastTrue, const JITCompiler::JumpList& fastFalse)
7519{
7520 JITCompiler::JumpList trueCase;
7521 JITCompiler::JumpList falseCase;
7522 JITCompiler::JumpList slowCase;
7523
7524 trueCase.append(fastTrue);
7525 falseCase.append(fastFalse);
7526
7527 m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
7528 m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
7529
7530 slowCase.append(m_jit.branchIfRopeStringImpl(leftTempGPR));
7531 slowCase.append(m_jit.branchIfRopeStringImpl(rightTempGPR));
7532
7533 m_jit.load32(MacroAssembler::Address(leftTempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
7534
7535 falseCase.append(m_jit.branch32(
7536 MacroAssembler::NotEqual,
7537 MacroAssembler::Address(rightTempGPR, StringImpl::lengthMemoryOffset()),
7538 lengthGPR));
7539
7540 trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
7541
7542 slowCase.append(m_jit.branchTest32(
7543 MacroAssembler::Zero,
7544 MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
7545 TrustedImm32(StringImpl::flagIs8Bit())));
7546 slowCase.append(m_jit.branchTest32(
7547 MacroAssembler::Zero,
7548 MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
7549 TrustedImm32(StringImpl::flagIs8Bit())));
7550
7551 m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
7552 m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
7553
7554 MacroAssembler::Label loop = m_jit.label();
7555
7556 m_jit.sub32(TrustedImm32(1), lengthGPR);
7557
7558 // This isn't going to generate the best code on x86. But that's OK, it's still better
7559 // than not inlining.
7560 m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
7561 m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
7562 falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
7563
7564 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
7565
7566 trueCase.link(&m_jit);
7567 moveTrueTo(leftTempGPR);
7568
7569 JITCompiler::Jump done = m_jit.jump();
7570
7571 falseCase.link(&m_jit);
7572 moveFalseTo(leftTempGPR);
7573
7574 done.link(&m_jit);
7575 addSlowPathGenerator(
7576 slowPathCall(
7577 slowCase, this, operationCompareStringEq, leftTempGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR));
7578
7579 blessedBooleanResult(leftTempGPR, node);
7580}
7581
7582void SpeculativeJIT::compileStringEquality(Node* node)
7583{
7584 SpeculateCellOperand left(this, node->child1());
7585 SpeculateCellOperand right(this, node->child2());
7586 GPRTemporary length(this);
7587 GPRTemporary leftTemp(this);
7588 GPRTemporary rightTemp(this);
7589 GPRTemporary leftTemp2(this, Reuse, left);
7590 GPRTemporary rightTemp2(this, Reuse, right);
7591
7592 GPRReg leftGPR = left.gpr();
7593 GPRReg rightGPR = right.gpr();
7594 GPRReg lengthGPR = length.gpr();
7595 GPRReg leftTempGPR = leftTemp.gpr();
7596 GPRReg rightTempGPR = rightTemp.gpr();
7597 GPRReg leftTemp2GPR = leftTemp2.gpr();
7598 GPRReg rightTemp2GPR = rightTemp2.gpr();
7599
7600 speculateString(node->child1(), leftGPR);
7601
7602 // It's safe to branch around the type check below, since proving that the values are
7603 // equal does indeed prove that the right value is a string.
7604 JITCompiler::Jump fastTrue = m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR);
7605
7606 speculateString(node->child2(), rightGPR);
7607
7608 compileStringEquality(
7609 node, leftGPR, rightGPR, lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
7610 rightTemp2GPR, fastTrue, JITCompiler::Jump());
7611}
7612
7613void SpeculativeJIT::compileStringToUntypedEquality(Node* node, Edge stringEdge, Edge untypedEdge)
7614{
7615 SpeculateCellOperand left(this, stringEdge);
7616 JSValueOperand right(this, untypedEdge, ManualOperandSpeculation);
7617 GPRTemporary length(this);
7618 GPRTemporary leftTemp(this);
7619 GPRTemporary rightTemp(this);
7620 GPRTemporary leftTemp2(this, Reuse, left);
7621 GPRTemporary rightTemp2(this);
7622
7623 GPRReg leftGPR = left.gpr();
7624 JSValueRegs rightRegs = right.jsValueRegs();
7625 GPRReg lengthGPR = length.gpr();
7626 GPRReg leftTempGPR = leftTemp.gpr();
7627 GPRReg rightTempGPR = rightTemp.gpr();
7628 GPRReg leftTemp2GPR = leftTemp2.gpr();
7629 GPRReg rightTemp2GPR = rightTemp2.gpr();
7630
7631 speculateString(stringEdge, leftGPR);
7632
7633 JITCompiler::JumpList fastTrue;
7634 JITCompiler::JumpList fastFalse;
7635
7636 fastFalse.append(m_jit.branchIfNotCell(rightRegs));
7637
7638 // It's safe to branch around the type check below, since proving that the values are
7639 // equal does indeed prove that the right value is a string.
7640 fastTrue.append(m_jit.branchPtr(
7641 MacroAssembler::Equal, leftGPR, rightRegs.payloadGPR()));
7642
7643 fastFalse.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
7644
7645 compileStringEquality(
7646 node, leftGPR, rightRegs.payloadGPR(), lengthGPR, leftTempGPR, rightTempGPR, leftTemp2GPR,
7647 rightTemp2GPR, fastTrue, fastFalse);
7648}
7649
7650void SpeculativeJIT::compileStringIdentEquality(Node* node)
7651{
7652 SpeculateCellOperand left(this, node->child1());
7653 SpeculateCellOperand right(this, node->child2());
7654 GPRTemporary leftTemp(this);
7655 GPRTemporary rightTemp(this);
7656
7657 GPRReg leftGPR = left.gpr();
7658 GPRReg rightGPR = right.gpr();
7659 GPRReg leftTempGPR = leftTemp.gpr();
7660 GPRReg rightTempGPR = rightTemp.gpr();
7661
7662 speculateString(node->child1(), leftGPR);
7663 speculateString(node->child2(), rightGPR);
7664
7665 speculateStringIdentAndLoadStorage(node->child1(), leftGPR, leftTempGPR);
7666 speculateStringIdentAndLoadStorage(node->child2(), rightGPR, rightTempGPR);
7667
7668 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, leftTempGPR);
7669
7670 unblessedBooleanResult(leftTempGPR, node);
7671}
7672
7673void SpeculativeJIT::compileStringIdentToNotStringVarEquality(
7674 Node* node, Edge stringEdge, Edge notStringVarEdge)
7675{
7676 SpeculateCellOperand left(this, stringEdge);
7677 JSValueOperand right(this, notStringVarEdge, ManualOperandSpeculation);
7678 GPRTemporary leftTemp(this);
7679 GPRTemporary rightTemp(this);
7680 GPRReg leftTempGPR = leftTemp.gpr();
7681 GPRReg rightTempGPR = rightTemp.gpr();
7682 GPRReg leftGPR = left.gpr();
7683 JSValueRegs rightRegs = right.jsValueRegs();
7684
7685 speculateString(stringEdge, leftGPR);
7686 speculateStringIdentAndLoadStorage(stringEdge, leftGPR, leftTempGPR);
7687
7688 moveFalseTo(rightTempGPR);
7689 JITCompiler::JumpList notString;
7690 notString.append(m_jit.branchIfNotCell(rightRegs));
7691 notString.append(m_jit.branchIfNotString(rightRegs.payloadGPR()));
7692
7693 speculateStringIdentAndLoadStorage(notStringVarEdge, rightRegs.payloadGPR(), rightTempGPR);
7694
7695 m_jit.comparePtr(MacroAssembler::Equal, leftTempGPR, rightTempGPR, rightTempGPR);
7696 notString.link(&m_jit);
7697
7698 unblessedBooleanResult(rightTempGPR, node);
7699}
7700
7701void SpeculativeJIT::compileStringCompare(Node* node, MacroAssembler::RelationalCondition condition)
7702{
7703 SpeculateCellOperand left(this, node->child1());
7704 SpeculateCellOperand right(this, node->child2());
7705 GPRReg leftGPR = left.gpr();
7706 GPRReg rightGPR = right.gpr();
7707
7708 speculateString(node->child1(), leftGPR);
7709 speculateString(node->child2(), rightGPR);
7710
7711 C_JITOperation_B_GJssJss compareFunction = nullptr;
7712 if (condition == MacroAssembler::LessThan)
7713 compareFunction = operationCompareStringLess;
7714 else if (condition == MacroAssembler::LessThanOrEqual)
7715 compareFunction = operationCompareStringLessEq;
7716 else if (condition == MacroAssembler::GreaterThan)
7717 compareFunction = operationCompareStringGreater;
7718 else if (condition == MacroAssembler::GreaterThanOrEqual)
7719 compareFunction = operationCompareStringGreaterEq;
7720 else
7721 RELEASE_ASSERT_NOT_REACHED();
7722
7723 GPRFlushedCallResult result(this);
7724 GPRReg resultGPR = result.gpr();
7725
7726 flushRegisters();
7727 callOperation(compareFunction, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
7728 m_jit.exceptionCheck();
7729
7730 unblessedBooleanResult(resultGPR, node);
7731}
7732
7733void SpeculativeJIT::compileStringIdentCompare(Node* node, MacroAssembler::RelationalCondition condition)
7734{
7735 SpeculateCellOperand left(this, node->child1());
7736 SpeculateCellOperand right(this, node->child2());
7737 GPRFlushedCallResult result(this);
7738 GPRTemporary leftTemp(this);
7739 GPRTemporary rightTemp(this);
7740
7741 GPRReg leftGPR = left.gpr();
7742 GPRReg rightGPR = right.gpr();
7743 GPRReg resultGPR = result.gpr();
7744 GPRReg leftTempGPR = leftTemp.gpr();
7745 GPRReg rightTempGPR = rightTemp.gpr();
7746
7747 speculateString(node->child1(), leftGPR);
7748 speculateString(node->child2(), rightGPR);
7749
7750 C_JITOperation_TT compareFunction = nullptr;
7751 if (condition == MacroAssembler::LessThan)
7752 compareFunction = operationCompareStringImplLess;
7753 else if (condition == MacroAssembler::LessThanOrEqual)
7754 compareFunction = operationCompareStringImplLessEq;
7755 else if (condition == MacroAssembler::GreaterThan)
7756 compareFunction = operationCompareStringImplGreater;
7757 else if (condition == MacroAssembler::GreaterThanOrEqual)
7758 compareFunction = operationCompareStringImplGreaterEq;
7759 else
7760 RELEASE_ASSERT_NOT_REACHED();
7761
7762 speculateStringIdentAndLoadStorage(node->child1(), leftGPR, leftTempGPR);
7763 speculateStringIdentAndLoadStorage(node->child2(), rightGPR, rightTempGPR);
7764
7765 flushRegisters();
7766 callOperation(compareFunction, resultGPR, leftTempGPR, rightTempGPR);
7767
7768 unblessedBooleanResult(resultGPR, node);
7769}
7770
7771void SpeculativeJIT::compileSameValue(Node* node)
7772{
7773 if (node->isBinaryUseKind(DoubleRepUse)) {
7774 SpeculateDoubleOperand arg1(this, node->child1());
7775 SpeculateDoubleOperand arg2(this, node->child2());
7776 GPRTemporary result(this);
7777 GPRTemporary temp(this);
7778 GPRTemporary temp2(this);
7779
7780 FPRReg arg1FPR = arg1.fpr();
7781 FPRReg arg2FPR = arg2.fpr();
7782 GPRReg resultGPR = result.gpr();
7783 GPRReg tempGPR = temp.gpr();
7784 GPRReg temp2GPR = temp2.gpr();
7785
7786#if USE(JSVALUE64)
7787 m_jit.moveDoubleTo64(arg1FPR, tempGPR);
7788 m_jit.moveDoubleTo64(arg2FPR, temp2GPR);
7789 auto trueCase = m_jit.branch64(CCallHelpers::Equal, tempGPR, temp2GPR);
7790#else
7791 GPRTemporary temp3(this);
7792 GPRReg temp3GPR = temp3.gpr();
7793
7794 m_jit.moveDoubleToInts(arg1FPR, tempGPR, temp2GPR);
7795 m_jit.moveDoubleToInts(arg2FPR, temp3GPR, resultGPR);
7796 auto notEqual = m_jit.branch32(CCallHelpers::NotEqual, tempGPR, temp3GPR);
7797 auto trueCase = m_jit.branch32(CCallHelpers::Equal, temp2GPR, resultGPR);
7798 notEqual.link(&m_jit);
7799#endif
7800
7801 m_jit.compareDouble(CCallHelpers::DoubleNotEqualOrUnordered, arg1FPR, arg1FPR, tempGPR);
7802 m_jit.compareDouble(CCallHelpers::DoubleNotEqualOrUnordered, arg2FPR, arg2FPR, temp2GPR);
7803 m_jit.and32(tempGPR, temp2GPR, resultGPR);
7804 auto done = m_jit.jump();
7805
7806 trueCase.link(&m_jit);
7807 m_jit.move(CCallHelpers::TrustedImm32(1), resultGPR);
7808 done.link(&m_jit);
7809
7810 unblessedBooleanResult(resultGPR, node);
7811 return;
7812 }
7813
7814 ASSERT(node->isBinaryUseKind(UntypedUse));
7815
7816 JSValueOperand arg1(this, node->child1());
7817 JSValueOperand arg2(this, node->child2());
7818 JSValueRegs arg1Regs = arg1.jsValueRegs();
7819 JSValueRegs arg2Regs = arg2.jsValueRegs();
7820
7821 arg1.use();
7822 arg2.use();
7823
7824 flushRegisters();
7825
7826 GPRFlushedCallResult result(this);
7827 GPRReg resultGPR = result.gpr();
7828 callOperation(operationSameValue, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs);
7829 m_jit.exceptionCheck();
7830
7831 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
7832}
7833
7834void SpeculativeJIT::compileToBooleanString(Node* node, bool invert)
7835{
7836 SpeculateCellOperand str(this, node->child1());
7837 GPRReg strGPR = str.gpr();
7838
7839 // Make sure that this is a string.
7840 speculateString(node->child1(), strGPR);
7841
7842 GPRTemporary eq(this);
7843 GPRReg eqGPR = eq.gpr();
7844
7845 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(vm())), eqGPR);
7846 m_jit.comparePtr(invert ? CCallHelpers::Equal : CCallHelpers::NotEqual, strGPR, eqGPR, eqGPR);
7847 unblessedBooleanResult(eqGPR, node);
7848}
7849
7850void SpeculativeJIT::compileToBooleanStringOrOther(Node* node, bool invert)
7851{
7852 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
7853 GPRTemporary temp(this);
7854 JSValueRegs valueRegs = value.jsValueRegs();
7855 GPRReg tempGPR = temp.gpr();
7856
7857 JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs);
7858 GPRReg cellGPR = valueRegs.payloadGPR();
7859 DFG_TYPE_CHECK(
7860 valueRegs, node->child1(), (~SpecCellCheck) | SpecString, m_jit.branchIfNotString(cellGPR));
7861
7862 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(vm())), tempGPR);
7863 m_jit.comparePtr(invert ? CCallHelpers::Equal : CCallHelpers::NotEqual, cellGPR, tempGPR, tempGPR);
7864 auto done = m_jit.jump();
7865
7866 notCell.link(&m_jit);
7867 DFG_TYPE_CHECK(
7868 valueRegs, node->child1(), SpecCellCheck | SpecOther, m_jit.branchIfNotOther(valueRegs, tempGPR));
7869 m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), tempGPR);
7870
7871 done.link(&m_jit);
7872 unblessedBooleanResult(tempGPR, node);
7873}
7874
7875void SpeculativeJIT::emitStringBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
7876{
7877 SpeculateCellOperand str(this, nodeUse);
7878
7879 GPRReg strGPR = str.gpr();
7880
7881 speculateString(nodeUse, strGPR);
7882
7883 branchPtr(CCallHelpers::Equal, strGPR, TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(vm())), notTaken);
7884 jump(taken);
7885
7886 noResult(m_currentNode);
7887}
7888
7889void SpeculativeJIT::emitStringOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
7890{
7891 JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
7892 GPRTemporary temp(this);
7893 JSValueRegs valueRegs = value.jsValueRegs();
7894 GPRReg tempGPR = temp.gpr();
7895
7896 JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs);
7897 GPRReg cellGPR = valueRegs.payloadGPR();
7898 DFG_TYPE_CHECK(valueRegs, nodeUse, (~SpecCellCheck) | SpecString, m_jit.branchIfNotString(cellGPR));
7899
7900 branchPtr(CCallHelpers::Equal, cellGPR, TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(vm())), notTaken);
7901 jump(taken, ForceJump);
7902
7903 notCell.link(&m_jit);
7904 DFG_TYPE_CHECK(
7905 valueRegs, nodeUse, SpecCellCheck | SpecOther, m_jit.branchIfNotOther(valueRegs, tempGPR));
7906 jump(notTaken);
7907 noResult(m_currentNode);
7908}
7909
7910void SpeculativeJIT::compileConstantStoragePointer(Node* node)
7911{
7912 GPRTemporary storage(this);
7913 GPRReg storageGPR = storage.gpr();
7914 m_jit.move(TrustedImmPtr(node->storagePointer()), storageGPR);
7915 storageResult(storageGPR, node);
7916}
7917
7918void SpeculativeJIT::cageTypedArrayStorage(GPRReg baseReg, GPRReg storageReg, bool validateAuth)
7919{
7920 auto untagArrayPtr = [&]() {
7921#if CPU(ARM64E)
7922 m_jit.untagArrayPtrLength32(MacroAssembler::Address(baseReg, JSArrayBufferView::offsetOfLength()), storageReg, validateAuth);
7923#else
7924 UNUSED_PARAM(validateAuth);
7925 UNUSED_PARAM(baseReg);
7926 UNUSED_PARAM(storageReg);
7927#endif
7928 };
7929
7930#if GIGACAGE_ENABLED
7931 UNUSED_PARAM(baseReg);
7932 if (!Gigacage::shouldBeEnabled()) {
7933 untagArrayPtr();
7934 return;
7935 }
7936
7937 if (!Gigacage::disablingPrimitiveGigacageIsForbidden()) {
7938 VM& vm = this->vm();
7939 if (vm.primitiveGigacageEnabled().isStillValid())
7940 m_jit.graph().watchpoints().addLazily(vm.primitiveGigacageEnabled());
7941 else {
7942 untagArrayPtr();
7943 return;
7944 }
7945 }
7946
7947 m_jit.cageWithoutUntagging(Gigacage::Primitive, storageReg);
7948#endif
7949 untagArrayPtr();
7950}
7951
7952void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node)
7953{
7954 SpeculateCellOperand base(this, node->child1());
7955 GPRReg baseReg = base.gpr();
7956
7957 GPRTemporary storage(this);
7958 GPRReg storageReg = storage.gpr();
7959
7960 switch (node->arrayMode().type()) {
7961 case Array::String:
7962 m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
7963
7964 addSlowPathGenerator(
7965 slowPathCall(
7966 m_jit.branchIfRopeStringImpl(storageReg),
7967 this, operationResolveRope, storageReg, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg));
7968
7969 m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
7970 break;
7971
7972 default: {
7973 auto typedArrayType = node->arrayMode().typedArrayType();
7974 ASSERT_UNUSED(typedArrayType, isTypedView(typedArrayType));
7975
7976 m_jit.loadPtr(JITCompiler::Address(baseReg, JSArrayBufferView::offsetOfVector()), storageReg);
7977 cageTypedArrayStorage(baseReg, storageReg);
7978 break;
7979 }
7980 }
7981
7982 storageResult(storageReg, node);
7983}
7984
7985void SpeculativeJIT::compileGetTypedArrayByteOffset(Node* node)
7986{
7987 SpeculateCellOperand base(this, node->child1());
7988 GPRTemporary vector(this);
7989 GPRTemporary data(this);
7990
7991 GPRReg baseGPR = base.gpr();
7992 GPRReg vectorGPR = vector.gpr();
7993 GPRReg dataGPR = data.gpr();
7994 ASSERT(baseGPR != vectorGPR);
7995 ASSERT(baseGPR != dataGPR);
7996 ASSERT(vectorGPR != dataGPR);
7997
7998 GPRReg arrayBufferGPR = dataGPR;
7999
8000 JITCompiler::Jump emptyByteOffset = m_jit.branch32(
8001 MacroAssembler::NotEqual,
8002 MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
8003 TrustedImm32(WastefulTypedArray));
8004
8005 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), vectorGPR);
8006
8007 JITCompiler::Jump nullVector = m_jit.branchPtr(JITCompiler::Equal, vectorGPR, TrustedImmPtr(JSArrayBufferView::nullVectorPtr()));
8008
8009 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), dataGPR);
8010 m_jit.cageWithoutUntagging(Gigacage::JSValue, dataGPR);
8011
8012 cageTypedArrayStorage(baseGPR, vectorGPR);
8013
8014 m_jit.loadPtr(MacroAssembler::Address(dataGPR, Butterfly::offsetOfArrayBuffer()), arrayBufferGPR);
8015 // FIXME: This needs caging.
8016 // https://bugs.webkit.org/show_bug.cgi?id=175515
8017 m_jit.loadPtr(MacroAssembler::Address(arrayBufferGPR, ArrayBuffer::offsetOfData()), dataGPR);
8018#if CPU(ARM64E)
8019 m_jit.removeArrayPtrTag(dataGPR);
8020#endif
8021
8022 m_jit.subPtr(dataGPR, vectorGPR);
8023
8024 JITCompiler::Jump done = m_jit.jump();
8025
8026#if CPU(ARM64E)
8027 nullVector.link(&m_jit);
8028#endif
8029 emptyByteOffset.link(&m_jit);
8030 m_jit.move(TrustedImmPtr(nullptr), vectorGPR);
8031
8032 done.link(&m_jit);
8033#if !CPU(ARM64E)
8034 ASSERT(!JSArrayBufferView::nullVectorPtr());
8035 nullVector.link(&m_jit);
8036#endif
8037
8038 strictInt32Result(vectorGPR, node);
8039}
8040
8041void SpeculativeJIT::compileGetByValOnDirectArguments(Node* node)
8042{
8043 SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
8044 SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
8045 JSValueRegsTemporary result(this);
8046 GPRTemporary scratch(this);
8047
8048 GPRReg baseReg = base.gpr();
8049 GPRReg propertyReg = property.gpr();
8050 JSValueRegs resultRegs = result.regs();
8051 GPRReg scratchReg = scratch.gpr();
8052
8053 if (!m_compileOkay)
8054 return;
8055
8056 speculationCheck(
8057 ExoticObjectMode, JSValueSource(), nullptr,
8058 m_jit.branchTestPtr(
8059 MacroAssembler::NonZero,
8060 MacroAssembler::Address(baseReg, DirectArguments::offsetOfMappedArguments())));
8061
8062 m_jit.load32(CCallHelpers::Address(baseReg, DirectArguments::offsetOfLength()), scratchReg);
8063 auto isOutOfBounds = m_jit.branch32(CCallHelpers::AboveOrEqual, propertyReg, scratchReg);
8064 if (node->arrayMode().isInBounds())
8065 speculationCheck(OutOfBounds, JSValueSource(), nullptr, isOutOfBounds);
8066
8067 m_jit.loadValue(
8068 MacroAssembler::BaseIndex(
8069 baseReg, propertyReg, MacroAssembler::TimesEight, DirectArguments::storageOffset()),
8070 resultRegs);
8071
8072 if (!node->arrayMode().isInBounds()) {
8073 addSlowPathGenerator(
8074 slowPathCall(
8075 isOutOfBounds, this, operationGetByValObjectInt,
8076 extractResult(resultRegs), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseReg, propertyReg));
8077 }
8078
8079 jsValueResult(resultRegs, node);
8080}
8081
8082void SpeculativeJIT::compileGetByValOnScopedArguments(Node* node)
8083{
8084 SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
8085 SpeculateStrictInt32Operand property(this, m_graph.varArgChild(node, 1));
8086 JSValueRegsTemporary result(this);
8087 GPRTemporary scratch(this);
8088 GPRTemporary scratch2(this);
8089
8090 GPRReg baseReg = base.gpr();
8091 GPRReg propertyReg = property.gpr();
8092 JSValueRegs resultRegs = result.regs();
8093 GPRReg scratchReg = scratch.gpr();
8094 GPRReg scratch2Reg = scratch2.gpr();
8095
8096 if (!m_compileOkay)
8097 return;
8098
8099 m_jit.loadPtr(
8100 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfStorage()), resultRegs.payloadGPR());
8101
8102 speculationCheck(
8103 ExoticObjectMode, JSValueSource(), nullptr,
8104 m_jit.branch32(
8105 MacroAssembler::AboveOrEqual, propertyReg,
8106 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength())));
8107
8108 m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTable()), scratchReg);
8109 m_jit.load32(
8110 MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfLength()), scratch2Reg);
8111
8112 MacroAssembler::Jump overflowArgument = m_jit.branch32(
8113 MacroAssembler::AboveOrEqual, propertyReg, scratch2Reg);
8114
8115 m_jit.loadPtr(MacroAssembler::Address(baseReg, ScopedArguments::offsetOfScope()), scratch2Reg);
8116
8117 m_jit.loadPtr(
8118 MacroAssembler::Address(scratchReg, ScopedArgumentsTable::offsetOfArguments()),
8119 scratchReg);
8120 m_jit.load32(
8121 MacroAssembler::BaseIndex(scratchReg, propertyReg, MacroAssembler::TimesFour),
8122 scratchReg);
8123
8124 speculationCheck(
8125 ExoticObjectMode, JSValueSource(), nullptr,
8126 m_jit.branch32(
8127 MacroAssembler::Equal, scratchReg, TrustedImm32(ScopeOffset::invalidOffset)));
8128
8129 m_jit.loadValue(
8130 MacroAssembler::BaseIndex(
8131 scratch2Reg, propertyReg, MacroAssembler::TimesEight,
8132 JSLexicalEnvironment::offsetOfVariables()),
8133 resultRegs);
8134
8135 MacroAssembler::Jump done = m_jit.jump();
8136 overflowArgument.link(&m_jit);
8137
8138 m_jit.sub32(propertyReg, scratch2Reg);
8139 m_jit.neg32(scratch2Reg);
8140
8141 m_jit.loadValue(
8142 MacroAssembler::BaseIndex(
8143 resultRegs.payloadGPR(), scratch2Reg, MacroAssembler::TimesEight),
8144 resultRegs);
8145 speculationCheck(ExoticObjectMode, JSValueSource(), nullptr, m_jit.branchIfEmpty(resultRegs));
8146
8147 done.link(&m_jit);
8148
8149 jsValueResult(resultRegs, node);
8150}
8151
8152void SpeculativeJIT::compileGetScope(Node* node)
8153{
8154 SpeculateCellOperand function(this, node->child1());
8155 GPRTemporary result(this, Reuse, function);
8156 m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr());
8157 cellResult(result.gpr(), node);
8158}
8159
8160void SpeculativeJIT::compileSkipScope(Node* node)
8161{
8162 SpeculateCellOperand scope(this, node->child1());
8163 GPRTemporary result(this, Reuse, scope);
8164 m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr());
8165 cellResult(result.gpr(), node);
8166}
8167
8168void SpeculativeJIT::compileGetGlobalObject(Node* node)
8169{
8170 SpeculateCellOperand object(this, node->child1());
8171 GPRTemporary result(this);
8172 GPRTemporary scratch(this);
8173 m_jit.emitLoadStructure(vm(), object.gpr(), result.gpr(), scratch.gpr());
8174 m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), result.gpr());
8175 cellResult(result.gpr(), node);
8176}
8177
8178void SpeculativeJIT::compileGetGlobalThis(Node* node)
8179{
8180 GPRTemporary result(this);
8181 GPRReg resultGPR = result.gpr();
8182 auto* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
8183 m_jit.loadPtr(globalObject->addressOfGlobalThis(), resultGPR);
8184 cellResult(resultGPR, node);
8185}
8186
8187bool SpeculativeJIT::canBeRope(Edge& edge)
8188{
8189 if (m_state.forNode(edge).isType(SpecStringIdent))
8190 return false;
8191 // If this value is LazyValue, it will be converted to JSString, and the result must be non-rope string.
8192 String string = edge->tryGetString(m_graph);
8193 if (!string.isNull())
8194 return false;
8195 return true;
8196}
8197
8198void SpeculativeJIT::compileGetArrayLength(Node* node)
8199{
8200 switch (node->arrayMode().type()) {
8201 case Array::Undecided:
8202 case Array::Int32:
8203 case Array::Double:
8204 case Array::Contiguous: {
8205 StorageOperand storage(this, node->child2());
8206 GPRTemporary result(this, Reuse, storage);
8207 GPRReg storageReg = storage.gpr();
8208 GPRReg resultReg = result.gpr();
8209 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
8210
8211 strictInt32Result(resultReg, node);
8212 break;
8213 }
8214 case Array::ArrayStorage:
8215 case Array::SlowPutArrayStorage: {
8216 StorageOperand storage(this, node->child2());
8217 GPRTemporary result(this, Reuse, storage);
8218 GPRReg storageReg = storage.gpr();
8219 GPRReg resultReg = result.gpr();
8220 m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
8221
8222 speculationCheck(Uncountable, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
8223
8224 strictInt32Result(resultReg, node);
8225 break;
8226 }
8227 case Array::String: {
8228 SpeculateCellOperand base(this, node->child1());
8229 GPRTemporary result(this, Reuse, base);
8230 GPRTemporary temp(this);
8231 GPRReg baseGPR = base.gpr();
8232 GPRReg resultGPR = result.gpr();
8233 GPRReg tempGPR = temp.gpr();
8234
8235 bool needsRopeCase = canBeRope(node->child1());
8236
8237 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSString::offsetOfValue()), tempGPR);
8238 CCallHelpers::Jump isRope;
8239 if (needsRopeCase)
8240 isRope = m_jit.branchIfRopeStringImpl(tempGPR);
8241 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), resultGPR);
8242 if (needsRopeCase) {
8243 auto done = m_jit.jump();
8244
8245 isRope.link(&m_jit);
8246 m_jit.load32(CCallHelpers::Address(baseGPR, JSRopeString::offsetOfLength()), resultGPR);
8247
8248 done.link(&m_jit);
8249 }
8250 strictInt32Result(resultGPR, node);
8251 break;
8252 }
8253 case Array::DirectArguments: {
8254 SpeculateCellOperand base(this, node->child1());
8255 GPRTemporary result(this, Reuse, base);
8256
8257 GPRReg baseReg = base.gpr();
8258 GPRReg resultReg = result.gpr();
8259
8260 if (!m_compileOkay)
8261 return;
8262
8263 speculationCheck(
8264 ExoticObjectMode, JSValueSource(), nullptr,
8265 m_jit.branchTestPtr(
8266 MacroAssembler::NonZero,
8267 MacroAssembler::Address(baseReg, DirectArguments::offsetOfMappedArguments())));
8268
8269 m_jit.load32(
8270 MacroAssembler::Address(baseReg, DirectArguments::offsetOfLength()), resultReg);
8271
8272 strictInt32Result(resultReg, node);
8273 break;
8274 }
8275 case Array::ScopedArguments: {
8276 SpeculateCellOperand base(this, node->child1());
8277 GPRTemporary result(this, Reuse, base);
8278
8279 GPRReg baseReg = base.gpr();
8280 GPRReg resultReg = result.gpr();
8281
8282 if (!m_compileOkay)
8283 return;
8284
8285 speculationCheck(
8286 ExoticObjectMode, JSValueSource(), nullptr,
8287 m_jit.branchTest8(
8288 MacroAssembler::NonZero,
8289 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfOverrodeThings())));
8290
8291 m_jit.load32(
8292 MacroAssembler::Address(baseReg, ScopedArguments::offsetOfTotalLength()), resultReg);
8293
8294 strictInt32Result(resultReg, node);
8295 break;
8296 }
8297 default: {
8298 ASSERT(node->arrayMode().isSomeTypedArrayView());
8299 SpeculateCellOperand base(this, node->child1());
8300 GPRTemporary result(this, Reuse, base);
8301 GPRReg baseGPR = base.gpr();
8302 GPRReg resultGPR = result.gpr();
8303 m_jit.load32(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfLength()), resultGPR);
8304 strictInt32Result(resultGPR, node);
8305 break;
8306 } }
8307}
8308
8309void SpeculativeJIT::compileCheckIdent(Node* node)
8310{
8311 SpeculateCellOperand stringOrSymbol(this, node->child1());
8312 GPRTemporary impl(this);
8313 GPRReg stringOrSymbolGPR = stringOrSymbol.gpr();
8314 GPRReg implGPR = impl.gpr();
8315
8316 if (node->child1().useKind() == StringIdentUse) {
8317 speculateString(node->child1(), stringOrSymbolGPR);
8318 speculateStringIdentAndLoadStorage(node->child1(), stringOrSymbolGPR, implGPR);
8319 } else {
8320 ASSERT(node->child1().useKind() == SymbolUse);
8321 speculateSymbol(node->child1(), stringOrSymbolGPR);
8322 m_jit.loadPtr(MacroAssembler::Address(stringOrSymbolGPR, Symbol::offsetOfSymbolImpl()), implGPR);
8323 }
8324
8325 UniquedStringImpl* uid = node->uidOperand();
8326 speculationCheck(
8327 BadIdent, JSValueSource(), nullptr,
8328 m_jit.branchPtr(JITCompiler::NotEqual, implGPR, TrustedImmPtr(uid)));
8329 noResult(node);
8330}
8331
8332template <typename ClassType>
8333void SpeculativeJIT::compileNewFunctionCommon(GPRReg resultGPR, RegisteredStructure structure, GPRReg scratch1GPR, GPRReg scratch2GPR, GPRReg scopeGPR, MacroAssembler::JumpList& slowPath, size_t size, FunctionExecutable* executable)
8334{
8335 auto butterfly = TrustedImmPtr(nullptr);
8336 emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, TrustedImmPtr(structure), butterfly, scratch1GPR, scratch2GPR, slowPath, size);
8337
8338 m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, JSFunction::offsetOfScopeChain()));
8339 m_jit.storePtr(TrustedImmPtr::weakPointer(m_jit.graph(), executable), JITCompiler::Address(resultGPR, JSFunction::offsetOfExecutableOrRareData()));
8340 m_jit.mutatorFence(vm());
8341}
8342
8343void SpeculativeJIT::compileNewFunction(Node* node)
8344{
8345 NodeType nodeType = node->op();
8346 ASSERT(nodeType == NewFunction || nodeType == NewGeneratorFunction || nodeType == NewAsyncFunction || nodeType == NewAsyncGeneratorFunction);
8347
8348 SpeculateCellOperand scope(this, node->child1());
8349 GPRReg scopeGPR = scope.gpr();
8350
8351 FunctionExecutable* executable = node->castOperand<FunctionExecutable*>();
8352
8353 if (executable->singleton().isStillValid()) {
8354 GPRFlushedCallResult result(this);
8355 GPRReg resultGPR = result.gpr();
8356
8357 flushRegisters();
8358
8359 if (nodeType == NewGeneratorFunction)
8360 callOperation(operationNewGeneratorFunction, resultGPR, &vm(), scopeGPR, executable);
8361 else if (nodeType == NewAsyncFunction)
8362 callOperation(operationNewAsyncFunction, resultGPR, &vm(), scopeGPR, executable);
8363 else if (nodeType == NewAsyncGeneratorFunction)
8364 callOperation(operationNewAsyncGeneratorFunction, resultGPR, &vm(), scopeGPR, executable);
8365 else
8366 callOperation(operationNewFunction, resultGPR, &vm(), scopeGPR, executable);
8367 m_jit.exceptionCheck();
8368 cellResult(resultGPR, node);
8369 return;
8370 }
8371
8372 RegisteredStructure structure = m_jit.graph().registerStructure(
8373 [&] () {
8374 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
8375 switch (nodeType) {
8376 case NewGeneratorFunction:
8377 return globalObject->generatorFunctionStructure();
8378 case NewAsyncFunction:
8379 return globalObject->asyncFunctionStructure();
8380 case NewAsyncGeneratorFunction:
8381 return globalObject->asyncGeneratorFunctionStructure();
8382 case NewFunction:
8383 return JSFunction::selectStructureForNewFuncExp(globalObject, node->castOperand<FunctionExecutable*>());
8384 default:
8385 RELEASE_ASSERT_NOT_REACHED();
8386 }
8387 }());
8388
8389 GPRTemporary result(this);
8390 GPRTemporary scratch1(this);
8391 GPRTemporary scratch2(this);
8392
8393 GPRReg resultGPR = result.gpr();
8394 GPRReg scratch1GPR = scratch1.gpr();
8395 GPRReg scratch2GPR = scratch2.gpr();
8396
8397 JITCompiler::JumpList slowPath;
8398
8399 if (nodeType == NewFunction) {
8400 compileNewFunctionCommon<JSFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSFunction::allocationSize(0), executable);
8401
8402 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewFunctionWithInvalidatedReallocationWatchpoint, resultGPR, &vm(), scopeGPR, executable));
8403 }
8404
8405 if (nodeType == NewGeneratorFunction) {
8406 compileNewFunctionCommon<JSGeneratorFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSGeneratorFunction::allocationSize(0), executable);
8407
8408 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint, resultGPR, &vm(), scopeGPR, executable));
8409 }
8410
8411 if (nodeType == NewAsyncFunction) {
8412 compileNewFunctionCommon<JSAsyncFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSAsyncFunction::allocationSize(0), executable);
8413
8414 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint, resultGPR, &vm(), scopeGPR, executable));
8415 }
8416
8417 if (nodeType == NewAsyncGeneratorFunction) {
8418 compileNewFunctionCommon<JSAsyncGeneratorFunction>(resultGPR, structure, scratch1GPR, scratch2GPR, scopeGPR, slowPath, JSAsyncGeneratorFunction::allocationSize(0), executable);
8419
8420 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewAsyncGeneratorFunctionWithInvalidatedReallocationWatchpoint, resultGPR, &vm(), scopeGPR, executable));
8421 }
8422
8423 cellResult(resultGPR, node);
8424}
8425
8426void SpeculativeJIT::compileSetFunctionName(Node* node)
8427{
8428 SpeculateCellOperand func(this, node->child1());
8429 GPRReg funcGPR = func.gpr();
8430 JSValueOperand nameValue(this, node->child2());
8431 JSValueRegs nameValueRegs = nameValue.jsValueRegs();
8432
8433 flushRegisters();
8434 callOperation(operationSetFunctionName, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), funcGPR, nameValueRegs);
8435 m_jit.exceptionCheck();
8436
8437 noResult(node);
8438}
8439
8440void SpeculativeJIT::compileVarargsLength(Node* node)
8441{
8442 LoadVarargsData* data = node->loadVarargsData();
8443
8444 JSValueRegs argumentsRegs;
8445 lock(GPRInfo::returnValueGPR);
8446 JSValueOperand arguments(this, node->argumentsChild());
8447 argumentsRegs = arguments.jsValueRegs();
8448 flushRegisters();
8449 unlock(GPRInfo::returnValueGPR);
8450
8451 callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentsRegs, data->offset);
8452 m_jit.exceptionCheck();
8453
8454 lock(GPRInfo::returnValueGPR);
8455 GPRTemporary argCountIncludingThis(this);
8456 GPRReg argCountIncludingThisGPR = argCountIncludingThis.gpr();
8457 unlock(GPRInfo::returnValueGPR);
8458
8459 m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
8460
8461 strictInt32Result(argCountIncludingThisGPR, node);
8462}
8463
8464void SpeculativeJIT::compileLoadVarargs(Node* node)
8465{
8466 LoadVarargsData* data = node->loadVarargsData();
8467
8468 SpeculateStrictInt32Operand argumentCount(this, node->child1());
8469 JSValueOperand arguments(this, node->argumentsChild());
8470 GPRReg argumentCountIncludingThis = argumentCount.gpr();
8471 JSValueRegs argumentsRegs = arguments.jsValueRegs();
8472
8473 speculationCheck(
8474 VarargsOverflow, JSValueSource(), Edge(), m_jit.branchTest32(
8475 MacroAssembler::Zero,
8476 argumentCountIncludingThis));
8477
8478 speculationCheck(
8479 VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
8480 MacroAssembler::Above,
8481 argumentCountIncludingThis,
8482 TrustedImm32(data->limit)));
8483
8484 flushRegisters();
8485
8486 m_jit.store32(argumentCountIncludingThis, JITCompiler::payloadFor(data->machineCount));
8487
8488 callOperation(operationLoadVarargs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), data->machineStart.offset(), argumentsRegs, data->offset, argumentCountIncludingThis, data->mandatoryMinimum);
8489 m_jit.exceptionCheck();
8490
8491 noResult(node);
8492}
8493
8494void SpeculativeJIT::compileForwardVarargs(Node* node)
8495{
8496 LoadVarargsData* data = node->loadVarargsData();
8497 InlineCallFrame* inlineCallFrame;
8498 if (node->argumentsChild())
8499 inlineCallFrame = node->argumentsChild()->origin.semantic.inlineCallFrame();
8500 else
8501 inlineCallFrame = node->origin.semantic.inlineCallFrame();
8502
8503 SpeculateStrictInt32Operand argumentCount(this, node->child1());
8504 GPRTemporary length(this);
8505 JSValueRegsTemporary temp(this);
8506 GPRReg argumentCountIncludingThis = argumentCount.gpr();
8507 GPRReg lengthGPR = argumentCount.gpr();
8508 JSValueRegs tempRegs = temp.regs();
8509
8510 m_jit.move(argumentCountIncludingThis, lengthGPR);
8511 if (data->offset)
8512 m_jit.sub32(TrustedImm32(data->offset), lengthGPR);
8513
8514 speculationCheck(
8515 VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
8516 MacroAssembler::Above,
8517 lengthGPR, TrustedImm32(data->limit)));
8518
8519 m_jit.store32(lengthGPR, JITCompiler::payloadFor(data->machineCount));
8520
8521 VirtualRegister sourceStart = JITCompiler::argumentsStart(inlineCallFrame) + data->offset;
8522 VirtualRegister targetStart = data->machineStart;
8523
8524 m_jit.sub32(TrustedImm32(1), lengthGPR);
8525
8526 // First have a loop that fills in the undefined slots in case of an arity check failure.
8527 m_jit.move(TrustedImm32(data->mandatoryMinimum), tempRegs.payloadGPR());
8528 JITCompiler::Jump done = m_jit.branch32(JITCompiler::BelowOrEqual, tempRegs.payloadGPR(), lengthGPR);
8529
8530 JITCompiler::Label loop = m_jit.label();
8531 m_jit.sub32(TrustedImm32(1), tempRegs.payloadGPR());
8532 m_jit.storeTrustedValue(
8533 jsUndefined(),
8534 JITCompiler::BaseIndex(
8535 GPRInfo::callFrameRegister, tempRegs.payloadGPR(), JITCompiler::TimesEight,
8536 targetStart.offset() * sizeof(EncodedJSValue)));
8537 m_jit.branch32(JITCompiler::Above, tempRegs.payloadGPR(), lengthGPR).linkTo(loop, &m_jit);
8538 done.link(&m_jit);
8539
8540 // And then fill in the actual argument values.
8541 done = m_jit.branchTest32(JITCompiler::Zero, lengthGPR);
8542
8543 loop = m_jit.label();
8544 m_jit.sub32(TrustedImm32(1), lengthGPR);
8545 m_jit.loadValue(
8546 JITCompiler::BaseIndex(
8547 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
8548 sourceStart.offset() * sizeof(EncodedJSValue)),
8549 tempRegs);
8550 m_jit.storeValue(
8551 tempRegs,
8552 JITCompiler::BaseIndex(
8553 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
8554 targetStart.offset() * sizeof(EncodedJSValue)));
8555 m_jit.branchTest32(JITCompiler::NonZero, lengthGPR).linkTo(loop, &m_jit);
8556
8557 done.link(&m_jit);
8558
8559 noResult(node);
8560}
8561
8562void SpeculativeJIT::compileCreateActivation(Node* node)
8563{
8564 SymbolTable* table = node->castOperand<SymbolTable*>();
8565 RegisteredStructure structure = m_jit.graph().registerStructure(m_jit.graph().globalObjectFor(
8566 node->origin.semantic)->activationStructure());
8567
8568 SpeculateCellOperand scope(this, node->child1());
8569 GPRReg scopeGPR = scope.gpr();
8570 JSValue initializationValue = node->initializationValueForActivation();
8571 ASSERT(initializationValue == jsUndefined() || initializationValue == jsTDZValue());
8572
8573 if (table->singleton().isStillValid()) {
8574 GPRFlushedCallResult result(this);
8575 GPRReg resultGPR = result.gpr();
8576
8577#if USE(JSVALUE32_64)
8578 JSValueRegsTemporary initialization(this);
8579 JSValueRegs initializationRegs = initialization.regs();
8580 m_jit.moveTrustedValue(initializationValue, initializationRegs);
8581#endif
8582
8583 flushRegisters();
8584
8585#if USE(JSVALUE64)
8586 callOperation(operationCreateActivationDirect,
8587 resultGPR, &vm(), structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue)));
8588#else
8589 callOperation(operationCreateActivationDirect,
8590 resultGPR, &vm(), structure, scopeGPR, table, initializationRegs);
8591#endif
8592 m_jit.exceptionCheck();
8593 cellResult(resultGPR, node);
8594 return;
8595 }
8596
8597 GPRTemporary result(this);
8598 GPRTemporary scratch1(this);
8599 GPRTemporary scratch2(this);
8600 GPRReg resultGPR = result.gpr();
8601 GPRReg scratch1GPR = scratch1.gpr();
8602 GPRReg scratch2GPR = scratch2.gpr();
8603
8604#if USE(JSVALUE32_64)
8605 JSValueRegsTemporary initialization(this);
8606 JSValueRegs initializationRegs = initialization.regs();
8607 m_jit.moveTrustedValue(initializationValue, initializationRegs);
8608#endif
8609
8610 JITCompiler::JumpList slowPath;
8611 auto butterfly = TrustedImmPtr(nullptr);
8612 emitAllocateJSObjectWithKnownSize<JSLexicalEnvironment>(
8613 resultGPR, TrustedImmPtr(structure), butterfly, scratch1GPR, scratch2GPR,
8614 slowPath, JSLexicalEnvironment::allocationSize(table));
8615
8616 // Don't need a memory barriers since we just fast-created the activation, so the
8617 // activation must be young.
8618 m_jit.storePtr(scopeGPR, JITCompiler::Address(resultGPR, JSScope::offsetOfNext()));
8619 m_jit.storePtr(
8620 TrustedImmPtr(node->cellOperand()),
8621 JITCompiler::Address(resultGPR, JSLexicalEnvironment::offsetOfSymbolTable()));
8622
8623 // Must initialize all members to undefined or the TDZ empty value.
8624 for (unsigned i = 0; i < table->scopeSize(); ++i) {
8625 m_jit.storeTrustedValue(
8626 initializationValue,
8627 JITCompiler::Address(
8628 resultGPR, JSLexicalEnvironment::offsetOfVariable(ScopeOffset(i))));
8629 }
8630
8631 m_jit.mutatorFence(vm());
8632
8633#if USE(JSVALUE64)
8634 addSlowPathGenerator(
8635 slowPathCall(
8636 slowPath, this, operationCreateActivationDirect, resultGPR, &vm(), structure, scopeGPR, table, TrustedImm64(JSValue::encode(initializationValue))));
8637#else
8638 addSlowPathGenerator(
8639 slowPathCall(
8640 slowPath, this, operationCreateActivationDirect, resultGPR, &vm(), structure, scopeGPR, table, initializationRegs));
8641#endif
8642
8643 cellResult(resultGPR, node);
8644}
8645
8646void SpeculativeJIT::compileCreateDirectArguments(Node* node)
8647{
8648 // FIXME: A more effective way of dealing with the argument count and callee is to have
8649 // them be explicit arguments to this node.
8650 // https://bugs.webkit.org/show_bug.cgi?id=142207
8651
8652 GPRTemporary result(this);
8653 GPRTemporary scratch1(this);
8654 GPRTemporary scratch2(this);
8655 GPRTemporary length;
8656 GPRReg resultGPR = result.gpr();
8657 GPRReg scratch1GPR = scratch1.gpr();
8658 GPRReg scratch2GPR = scratch2.gpr();
8659 GPRReg lengthGPR = InvalidGPRReg;
8660 JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(scratch1GPR, scratch2GPR);
8661
8662 unsigned minCapacity = m_jit.graph().baselineCodeBlockFor(node->origin.semantic)->numParameters() - 1;
8663
8664 unsigned knownLength;
8665 bool lengthIsKnown; // if false, lengthGPR will have the length.
8666 auto* inlineCallFrame = node->origin.semantic.inlineCallFrame();
8667 if (inlineCallFrame
8668 && !inlineCallFrame->isVarargs()) {
8669 knownLength = static_cast<unsigned>(inlineCallFrame->argumentCountIncludingThis - 1);
8670 lengthIsKnown = true;
8671 } else {
8672 knownLength = UINT_MAX;
8673 lengthIsKnown = false;
8674
8675 GPRTemporary realLength(this);
8676 length.adopt(realLength);
8677 lengthGPR = length.gpr();
8678
8679 VirtualRegister argumentCountRegister = m_jit.argumentCount(node->origin.semantic);
8680 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), lengthGPR);
8681 m_jit.sub32(TrustedImm32(1), lengthGPR);
8682 }
8683
8684 RegisteredStructure structure =
8685 m_jit.graph().registerStructure(m_jit.graph().globalObjectFor(node->origin.semantic)->directArgumentsStructure());
8686
8687 // Use a different strategy for allocating the object depending on whether we know its
8688 // size statically.
8689 JITCompiler::JumpList slowPath;
8690 if (lengthIsKnown) {
8691 auto butterfly = TrustedImmPtr(nullptr);
8692 emitAllocateJSObjectWithKnownSize<DirectArguments>(
8693 resultGPR, TrustedImmPtr(structure), butterfly, scratch1GPR, scratch2GPR,
8694 slowPath, DirectArguments::allocationSize(std::max(knownLength, minCapacity)));
8695
8696 m_jit.store32(
8697 TrustedImm32(knownLength),
8698 JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
8699 } else {
8700 JITCompiler::Jump tooFewArguments;
8701 if (minCapacity) {
8702 tooFewArguments =
8703 m_jit.branch32(JITCompiler::Below, lengthGPR, TrustedImm32(minCapacity));
8704 }
8705 m_jit.lshift32(lengthGPR, TrustedImm32(3), scratch1GPR);
8706 m_jit.add32(TrustedImm32(DirectArguments::storageOffset()), scratch1GPR);
8707 if (minCapacity) {
8708 JITCompiler::Jump done = m_jit.jump();
8709 tooFewArguments.link(&m_jit);
8710 m_jit.move(TrustedImm32(DirectArguments::allocationSize(minCapacity)), scratch1GPR);
8711 done.link(&m_jit);
8712 }
8713
8714 emitAllocateVariableSizedJSObject<DirectArguments>(
8715 resultGPR, TrustedImmPtr(structure), scratch1GPR, scratch1GPR, scratch2GPR,
8716 slowPath);
8717
8718 m_jit.store32(
8719 lengthGPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfLength()));
8720 }
8721
8722 m_jit.store32(
8723 TrustedImm32(minCapacity),
8724 JITCompiler::Address(resultGPR, DirectArguments::offsetOfMinCapacity()));
8725
8726 m_jit.storePtr(
8727 TrustedImmPtr(nullptr), JITCompiler::Address(resultGPR, DirectArguments::offsetOfMappedArguments()));
8728
8729 m_jit.storePtr(
8730 TrustedImmPtr(nullptr), JITCompiler::Address(resultGPR, DirectArguments::offsetOfModifiedArgumentsDescriptor()));
8731
8732 if (lengthIsKnown) {
8733 addSlowPathGenerator(
8734 slowPathCall(
8735 slowPath, this, operationCreateDirectArguments, resultGPR, &vm(), structure,
8736 knownLength, minCapacity));
8737 } else {
8738 auto generator = makeUnique<CallCreateDirectArgumentsSlowPathGenerator>(
8739 slowPath, this, resultGPR, structure, lengthGPR, minCapacity);
8740 addSlowPathGenerator(WTFMove(generator));
8741 }
8742
8743 if (inlineCallFrame) {
8744 if (inlineCallFrame->isClosureCall) {
8745 m_jit.loadPtr(
8746 JITCompiler::addressFor(
8747 inlineCallFrame->calleeRecovery.virtualRegister()),
8748 scratch1GPR);
8749 } else {
8750 m_jit.move(
8751 TrustedImmPtr::weakPointer(
8752 m_jit.graph(), inlineCallFrame->calleeRecovery.constant().asCell()),
8753 scratch1GPR);
8754 }
8755 } else
8756 m_jit.loadPtr(JITCompiler::addressFor(CallFrameSlot::callee), scratch1GPR);
8757
8758 // Don't need a memory barriers since we just fast-created the activation, so the
8759 // activation must be young.
8760 m_jit.storePtr(
8761 scratch1GPR, JITCompiler::Address(resultGPR, DirectArguments::offsetOfCallee()));
8762
8763 VirtualRegister start = m_jit.argumentsStart(node->origin.semantic);
8764 if (lengthIsKnown) {
8765 for (unsigned i = 0; i < std::max(knownLength, minCapacity); ++i) {
8766 m_jit.loadValue(JITCompiler::addressFor(start + i), valueRegs);
8767 m_jit.storeValue(
8768 valueRegs, JITCompiler::Address(resultGPR, DirectArguments::offsetOfSlot(i)));
8769 }
8770 } else {
8771 JITCompiler::Jump done;
8772 if (minCapacity) {
8773 JITCompiler::Jump startLoop = m_jit.branch32(
8774 JITCompiler::AboveOrEqual, lengthGPR, TrustedImm32(minCapacity));
8775 m_jit.move(TrustedImm32(minCapacity), lengthGPR);
8776 startLoop.link(&m_jit);
8777 } else
8778 done = m_jit.branchTest32(MacroAssembler::Zero, lengthGPR);
8779 JITCompiler::Label loop = m_jit.label();
8780 m_jit.sub32(TrustedImm32(1), lengthGPR);
8781 m_jit.loadValue(
8782 JITCompiler::BaseIndex(
8783 GPRInfo::callFrameRegister, lengthGPR, JITCompiler::TimesEight,
8784 start.offset() * static_cast<int>(sizeof(Register))),
8785 valueRegs);
8786 m_jit.storeValue(
8787 valueRegs,
8788 JITCompiler::BaseIndex(
8789 resultGPR, lengthGPR, JITCompiler::TimesEight,
8790 DirectArguments::storageOffset()));
8791 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
8792 if (done.isSet())
8793 done.link(&m_jit);
8794 }
8795
8796 m_jit.mutatorFence(vm());
8797
8798 cellResult(resultGPR, node);
8799}
8800
8801void SpeculativeJIT::compileGetFromArguments(Node* node)
8802{
8803 SpeculateCellOperand arguments(this, node->child1());
8804 JSValueRegsTemporary result(this);
8805
8806 GPRReg argumentsGPR = arguments.gpr();
8807 JSValueRegs resultRegs = result.regs();
8808
8809 m_jit.loadValue(JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())), resultRegs);
8810 jsValueResult(resultRegs, node);
8811}
8812
8813void SpeculativeJIT::compilePutToArguments(Node* node)
8814{
8815 SpeculateCellOperand arguments(this, node->child1());
8816 JSValueOperand value(this, node->child2());
8817
8818 GPRReg argumentsGPR = arguments.gpr();
8819 JSValueRegs valueRegs = value.jsValueRegs();
8820
8821 m_jit.storeValue(valueRegs, JITCompiler::Address(argumentsGPR, DirectArguments::offsetOfSlot(node->capturedArgumentsOffset().offset())));
8822 noResult(node);
8823}
8824
8825void SpeculativeJIT::compileGetArgument(Node* node)
8826{
8827 GPRTemporary argumentCount(this);
8828 JSValueRegsTemporary result(this);
8829 GPRReg argumentCountGPR = argumentCount.gpr();
8830 JSValueRegs resultRegs = result.regs();
8831 m_jit.load32(CCallHelpers::payloadFor(m_jit.argumentCount(node->origin.semantic)), argumentCountGPR);
8832 auto argumentOutOfBounds = m_jit.branch32(CCallHelpers::LessThanOrEqual, argumentCountGPR, CCallHelpers::TrustedImm32(node->argumentIndex()));
8833 m_jit.loadValue(CCallHelpers::addressFor(CCallHelpers::argumentsStart(node->origin.semantic) + node->argumentIndex() - 1), resultRegs);
8834 auto done = m_jit.jump();
8835
8836 argumentOutOfBounds.link(&m_jit);
8837 m_jit.moveValue(jsUndefined(), resultRegs);
8838
8839 done.link(&m_jit);
8840 jsValueResult(resultRegs, node);
8841}
8842
8843void SpeculativeJIT::compileCreateScopedArguments(Node* node)
8844{
8845 SpeculateCellOperand scope(this, node->child1());
8846 GPRReg scopeGPR = scope.gpr();
8847
8848 GPRFlushedCallResult result(this);
8849 GPRReg resultGPR = result.gpr();
8850 flushRegisters();
8851
8852 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
8853
8854 // We set up the arguments ourselves, because we have the whole register file and we can
8855 // set them up directly into the argument registers. This also means that we don't have to
8856 // invent a four-argument-register shuffle.
8857
8858 // Arguments: 0:JSGlobalObject*, 1:structure, 2:start, 3:length, 4:callee, 5:scope
8859
8860 // Do the scopeGPR first, since it might alias an argument register.
8861 m_jit.setupArgument(5, [&] (GPRReg destGPR) { m_jit.move(scopeGPR, destGPR); });
8862
8863 // These other things could be done in any order.
8864 m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
8865 m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
8866 m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
8867 m_jit.setupArgument(
8868 1, [&] (GPRReg destGPR) {
8869 m_jit.move(
8870 TrustedImmPtr::weakPointer(m_jit.graph(), globalObject->scopedArgumentsStructure()),
8871 destGPR);
8872 });
8873 m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(TrustedImmPtr::weakPointer(m_graph, globalObject), destGPR); });
8874
8875 appendCallSetResult(operationCreateScopedArguments, resultGPR);
8876 m_jit.exceptionCheck();
8877
8878 cellResult(resultGPR, node);
8879}
8880
8881void SpeculativeJIT::compileCreateClonedArguments(Node* node)
8882{
8883 GPRFlushedCallResult result(this);
8884 GPRReg resultGPR = result.gpr();
8885 flushRegisters();
8886
8887 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
8888
8889 // We set up the arguments ourselves, because we have the whole register file and we can
8890 // set them up directly into the argument registers.
8891
8892 // Arguments: 0:JSGlobalObject*, 1:structure, 2:start, 3:length, 4:callee
8893 m_jit.setupArgument(4, [&] (GPRReg destGPR) { emitGetCallee(node->origin.semantic, destGPR); });
8894 m_jit.setupArgument(3, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
8895 m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
8896 m_jit.setupArgument(
8897 1, [&] (GPRReg destGPR) {
8898 m_jit.move(
8899 TrustedImmPtr::weakPointer(
8900 m_jit.graph(), globalObject->clonedArgumentsStructure()),
8901 destGPR);
8902 });
8903 m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(TrustedImmPtr::weakPointer(m_graph, globalObject), destGPR); });
8904
8905 appendCallSetResult(operationCreateClonedArguments, resultGPR);
8906 m_jit.exceptionCheck();
8907
8908 cellResult(resultGPR, node);
8909}
8910
8911void SpeculativeJIT::compileCreateArgumentsButterfly(Node* node)
8912{
8913 GPRFlushedCallResult result(this);
8914 GPRReg resultGPR = result.gpr();
8915 flushRegisters();
8916
8917 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
8918
8919 // We set up the arguments ourselves, because we have the whole register file and we can
8920 // set them up directly into the argument registers.
8921
8922 // Arguments: 0:JSGlobalObject*, 1:start, 3:length
8923 m_jit.setupArgument(2, [&] (GPRReg destGPR) { emitGetLength(node->origin.semantic, destGPR); });
8924 m_jit.setupArgument(1, [&] (GPRReg destGPR) { emitGetArgumentStart(node->origin.semantic, destGPR); });
8925 m_jit.setupArgument(0, [&] (GPRReg destGPR) { m_jit.move(TrustedImmPtr::weakPointer(m_graph, globalObject), destGPR); });
8926
8927 appendCallSetResult(operationCreateArgumentsButterfly, resultGPR);
8928 m_jit.exceptionCheck();
8929
8930 cellResult(resultGPR, node);
8931}
8932
8933void SpeculativeJIT::compileCreateRest(Node* node)
8934{
8935 ASSERT(node->op() == CreateRest);
8936
8937 if (m_jit.graph().isWatchingHavingABadTimeWatchpoint(node)) {
8938 SpeculateStrictInt32Operand arrayLength(this, node->child1());
8939 GPRTemporary arrayResult(this);
8940
8941 GPRReg arrayLengthGPR = arrayLength.gpr();
8942 GPRReg arrayResultGPR = arrayResult.gpr();
8943
8944 // We can tell compileAllocateNewArrayWithSize() that it does not need to check
8945 // for large arrays and use ArrayStorage structure because arrayLength here will
8946 // always be bounded by stack size. Realistically, we won't be able to push enough
8947 // arguments to have arrayLength exceed MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH.
8948 bool shouldAllowForArrayStorageStructureForLargeArrays = false;
8949 compileAllocateNewArrayWithSize(m_jit.graph().globalObjectFor(node->origin.semantic), arrayResultGPR, arrayLengthGPR, ArrayWithContiguous, shouldAllowForArrayStorageStructureForLargeArrays);
8950
8951 GPRTemporary argumentsStart(this);
8952 GPRReg argumentsStartGPR = argumentsStart.gpr();
8953
8954 emitGetArgumentStart(node->origin.semantic, argumentsStartGPR);
8955
8956 GPRTemporary butterfly(this);
8957 GPRTemporary currentLength(this);
8958 JSValueRegsTemporary value(this);
8959
8960 JSValueRegs valueRegs = value.regs();
8961 GPRReg currentLengthGPR = currentLength.gpr();
8962 GPRReg butterflyGPR = butterfly.gpr();
8963
8964 m_jit.loadPtr(MacroAssembler::Address(arrayResultGPR, JSObject::butterflyOffset()), butterflyGPR);
8965
8966 CCallHelpers::Jump skipLoop = m_jit.branch32(MacroAssembler::Equal, arrayLengthGPR, TrustedImm32(0));
8967 m_jit.zeroExtend32ToWord(arrayLengthGPR, currentLengthGPR);
8968 m_jit.addPtr(Imm32(sizeof(Register) * node->numberOfArgumentsToSkip()), argumentsStartGPR);
8969
8970 auto loop = m_jit.label();
8971 m_jit.sub32(TrustedImm32(1), currentLengthGPR);
8972 m_jit.loadValue(JITCompiler::BaseIndex(argumentsStartGPR, currentLengthGPR, MacroAssembler::TimesEight), valueRegs);
8973 m_jit.storeValue(valueRegs, MacroAssembler::BaseIndex(butterflyGPR, currentLengthGPR, MacroAssembler::TimesEight));
8974 m_jit.branch32(MacroAssembler::NotEqual, currentLengthGPR, TrustedImm32(0)).linkTo(loop, &m_jit);
8975
8976 skipLoop.link(&m_jit);
8977 cellResult(arrayResultGPR, node);
8978 return;
8979 }
8980
8981 SpeculateStrictInt32Operand arrayLength(this, node->child1());
8982 GPRTemporary argumentsStart(this);
8983 GPRTemporary numberOfArgumentsToSkip(this);
8984
8985 GPRReg arrayLengthGPR = arrayLength.gpr();
8986 GPRReg argumentsStartGPR = argumentsStart.gpr();
8987
8988 emitGetArgumentStart(node->origin.semantic, argumentsStartGPR);
8989
8990 flushRegisters();
8991
8992 GPRFlushedCallResult result(this);
8993 GPRReg resultGPR = result.gpr();
8994 callOperation(operationCreateRest, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentsStartGPR, Imm32(node->numberOfArgumentsToSkip()), arrayLengthGPR);
8995 m_jit.exceptionCheck();
8996
8997 cellResult(resultGPR, node);
8998}
8999
9000void SpeculativeJIT::compileSpread(Node* node)
9001{
9002 ASSERT(node->op() == Spread);
9003
9004 SpeculateCellOperand operand(this, node->child1());
9005 GPRReg argument = operand.gpr();
9006
9007 if (node->child1().useKind() == ArrayUse)
9008 speculateArray(node->child1(), argument);
9009
9010 if (m_jit.graph().canDoFastSpread(node, m_state.forNode(node->child1()))) {
9011#if USE(JSVALUE64)
9012 GPRTemporary result(this);
9013 GPRTemporary scratch1(this);
9014 GPRTemporary scratch2(this);
9015 GPRTemporary length(this);
9016 FPRTemporary doubleRegister(this);
9017
9018 GPRReg resultGPR = result.gpr();
9019 GPRReg scratch1GPR = scratch1.gpr();
9020 GPRReg scratch2GPR = scratch2.gpr();
9021 GPRReg lengthGPR = length.gpr();
9022 FPRReg doubleFPR = doubleRegister.fpr();
9023
9024 MacroAssembler::JumpList slowPath;
9025 MacroAssembler::JumpList done;
9026
9027 m_jit.load8(MacroAssembler::Address(argument, JSCell::indexingTypeAndMiscOffset()), scratch1GPR);
9028 m_jit.and32(TrustedImm32(IndexingModeMask), scratch1GPR);
9029 auto notShareCase = m_jit.branch32(CCallHelpers::NotEqual, scratch1GPR, TrustedImm32(CopyOnWriteArrayWithContiguous));
9030 m_jit.loadPtr(MacroAssembler::Address(argument, JSObject::butterflyOffset()), resultGPR);
9031 m_jit.addPtr(TrustedImm32(-static_cast<ptrdiff_t>(JSImmutableButterfly::offsetOfData())), resultGPR);
9032 done.append(m_jit.jump());
9033
9034 notShareCase.link(&m_jit);
9035 m_jit.and32(TrustedImm32(IndexingShapeMask), scratch1GPR);
9036 m_jit.sub32(TrustedImm32(Int32Shape), scratch1GPR);
9037
9038 slowPath.append(m_jit.branch32(MacroAssembler::Above, scratch1GPR, TrustedImm32(ContiguousShape - Int32Shape)));
9039
9040 m_jit.loadPtr(MacroAssembler::Address(argument, JSObject::butterflyOffset()), lengthGPR);
9041 m_jit.load32(MacroAssembler::Address(lengthGPR, Butterfly::offsetOfPublicLength()), lengthGPR);
9042 slowPath.append(m_jit.branch32(MacroAssembler::Above, lengthGPR, TrustedImm32(MAX_STORAGE_VECTOR_LENGTH)));
9043 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "This is strongly assumed in the code below.");
9044 m_jit.move(lengthGPR, scratch1GPR);
9045 m_jit.lshift32(TrustedImm32(3), scratch1GPR);
9046 m_jit.add32(TrustedImm32(JSImmutableButterfly::offsetOfData()), scratch1GPR);
9047
9048 m_jit.emitAllocateVariableSizedCell<JSImmutableButterfly>(vm(), resultGPR, TrustedImmPtr(m_jit.graph().registerStructure(m_jit.graph().m_vm.immutableButterflyStructures[arrayIndexFromIndexingType(CopyOnWriteArrayWithContiguous) - NumberOfIndexingShapes].get())), scratch1GPR, scratch1GPR, scratch2GPR, slowPath);
9049 m_jit.store32(lengthGPR, MacroAssembler::Address(resultGPR, JSImmutableButterfly::offsetOfPublicLength()));
9050 m_jit.store32(lengthGPR, MacroAssembler::Address(resultGPR, JSImmutableButterfly::offsetOfVectorLength()));
9051
9052 m_jit.loadPtr(MacroAssembler::Address(argument, JSObject::butterflyOffset()), scratch1GPR);
9053
9054 m_jit.load8(MacroAssembler::Address(argument, JSCell::indexingTypeAndMiscOffset()), scratch2GPR);
9055 m_jit.and32(TrustedImm32(IndexingShapeMask), scratch2GPR);
9056 auto isDoubleArray = m_jit.branch32(MacroAssembler::Equal, scratch2GPR, TrustedImm32(DoubleShape));
9057
9058 {
9059 done.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
9060 auto loopStart = m_jit.label();
9061 m_jit.sub32(TrustedImm32(1), lengthGPR);
9062 m_jit.load64(MacroAssembler::BaseIndex(scratch1GPR, lengthGPR, MacroAssembler::TimesEight), scratch2GPR);
9063 auto notEmpty = m_jit.branchIfNotEmpty(scratch2GPR);
9064 m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), scratch2GPR);
9065 notEmpty.link(&m_jit);
9066 m_jit.store64(scratch2GPR, MacroAssembler::BaseIndex(resultGPR, lengthGPR, MacroAssembler::TimesEight, JSImmutableButterfly::offsetOfData()));
9067 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loopStart, &m_jit);
9068 done.append(m_jit.jump());
9069 }
9070
9071 isDoubleArray.link(&m_jit);
9072 {
9073 done.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
9074 auto loopStart = m_jit.label();
9075 m_jit.sub32(TrustedImm32(1), lengthGPR);
9076 m_jit.loadDouble(MacroAssembler::BaseIndex(scratch1GPR, lengthGPR, MacroAssembler::TimesEight), doubleFPR);
9077 auto notEmpty = m_jit.branchIfNotNaN(doubleFPR);
9078 m_jit.move(TrustedImm64(JSValue::encode(jsUndefined())), scratch2GPR);
9079 auto doStore = m_jit.jump();
9080 notEmpty.link(&m_jit);
9081 m_jit.boxDouble(doubleFPR, scratch2GPR);
9082 doStore.link(&m_jit);
9083 m_jit.store64(scratch2GPR, MacroAssembler::BaseIndex(resultGPR, lengthGPR, MacroAssembler::TimesEight, JSImmutableButterfly::offsetOfData()));
9084 m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loopStart, &m_jit);
9085 done.append(m_jit.jump());
9086 }
9087
9088 addSlowPathGenerator(slowPathCall(slowPath, this, operationSpreadFastArray, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argument));
9089
9090 done.link(&m_jit);
9091 m_jit.mutatorFence(vm());
9092 cellResult(resultGPR, node);
9093#else
9094 flushRegisters();
9095
9096 GPRFlushedCallResult result(this);
9097 GPRReg resultGPR = result.gpr();
9098 callOperation(operationSpreadFastArray, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argument);
9099 m_jit.exceptionCheck();
9100 cellResult(resultGPR, node);
9101#endif // USE(JSVALUE64)
9102 } else {
9103 flushRegisters();
9104
9105 GPRFlushedCallResult result(this);
9106 GPRReg resultGPR = result.gpr();
9107 callOperation(operationSpreadGeneric, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argument);
9108 m_jit.exceptionCheck();
9109 cellResult(resultGPR, node);
9110 }
9111}
9112
9113void SpeculativeJIT::compileNewArray(Node* node)
9114{
9115 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
9116 RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
9117 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
9118 unsigned numElements = node->numChildren();
9119 unsigned vectorLengthHint = node->vectorLengthHint();
9120 ASSERT(vectorLengthHint >= numElements);
9121
9122 // Because we first speculate on all of the children here, we can never exit after creating
9123 // uninitialized contiguous JSArray, which ensures that we will never produce a half-baked JSArray.
9124 for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex)
9125 speculate(node, m_jit.graph().varArgChild(node, operandIndex));
9126
9127 GPRTemporary result(this);
9128 GPRTemporary storage(this);
9129
9130 GPRReg resultGPR = result.gpr();
9131 GPRReg storageGPR = storage.gpr();
9132
9133 emitAllocateRawObject(resultGPR, structure, storageGPR, numElements, vectorLengthHint);
9134
9135 // At this point, one way or another, resultGPR and storageGPR have pointers to
9136 // the JSArray and the Butterfly, respectively.
9137
9138 ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren());
9139
9140 for (unsigned operandIndex = 0; operandIndex < node->numChildren(); ++operandIndex) {
9141 Edge use = m_jit.graph().varArgChild(node, operandIndex);
9142 switch (node->indexingType()) {
9143 case ALL_BLANK_INDEXING_TYPES:
9144 case ALL_UNDECIDED_INDEXING_TYPES:
9145 CRASH();
9146 break;
9147 case ALL_DOUBLE_INDEXING_TYPES: {
9148 SpeculateDoubleOperand operand(this, use);
9149 FPRReg opFPR = operand.fpr();
9150 m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIndex));
9151 break;
9152 }
9153 case ALL_INT32_INDEXING_TYPES:
9154 case ALL_CONTIGUOUS_INDEXING_TYPES: {
9155 JSValueOperand operand(this, use, ManualOperandSpeculation);
9156 JSValueRegs operandRegs = operand.jsValueRegs();
9157 m_jit.storeValue(operandRegs, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIndex));
9158 break;
9159 }
9160 default:
9161 CRASH();
9162 break;
9163 }
9164 }
9165
9166 // Yuck, we should *really* have a way of also returning the storageGPR. But
9167 // that's the least of what's wrong with this code. We really shouldn't be
9168 // allocating the array after having computed - and probably spilled to the
9169 // stack - all of the things that will go into the array. The solution to that
9170 // bigger problem will also likely fix the redundancy in reloading the storage
9171 // pointer that we currently have.
9172
9173 cellResult(resultGPR, node);
9174 return;
9175 }
9176
9177 if (!node->numChildren()) {
9178 flushRegisters();
9179 GPRFlushedCallResult result(this);
9180 callOperation(operationNewEmptyArray, result.gpr(), &vm(), structure);
9181 m_jit.exceptionCheck();
9182 cellResult(result.gpr(), node);
9183 return;
9184 }
9185
9186 size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();
9187 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
9188 EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : nullptr;
9189
9190 for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
9191 // Need to perform the speculations that this node promises to perform. If we're
9192 // emitting code here and the indexing type is not array storage then there is
9193 // probably something hilarious going on and we're already failing at all the
9194 // things, but at least we're going to be sound.
9195 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
9196 switch (node->indexingType()) {
9197 case ALL_BLANK_INDEXING_TYPES:
9198 case ALL_UNDECIDED_INDEXING_TYPES:
9199 CRASH();
9200 break;
9201 case ALL_DOUBLE_INDEXING_TYPES: {
9202 SpeculateDoubleOperand operand(this, use);
9203 FPRReg opFPR = operand.fpr();
9204 DFG_TYPE_CHECK(
9205 JSValueRegs(), use, SpecDoubleReal,
9206 m_jit.branchIfNaN(opFPR));
9207#if USE(JSVALUE64)
9208 JSValueRegsTemporary scratch(this);
9209 JSValueRegs scratchRegs = scratch.regs();
9210 m_jit.boxDouble(opFPR, scratchRegs);
9211 m_jit.storeValue(scratchRegs, buffer + operandIdx);
9212#else
9213 m_jit.storeDouble(opFPR, TrustedImmPtr(buffer + operandIdx));
9214#endif
9215 operand.use();
9216 break;
9217 }
9218 case ALL_INT32_INDEXING_TYPES:
9219 case ALL_CONTIGUOUS_INDEXING_TYPES:
9220 case ALL_ARRAY_STORAGE_INDEXING_TYPES: {
9221 JSValueOperand operand(this, use, ManualOperandSpeculation);
9222 JSValueRegs operandRegs = operand.jsValueRegs();
9223 if (hasInt32(node->indexingType())) {
9224 DFG_TYPE_CHECK(
9225 operandRegs, use, SpecInt32Only,
9226 m_jit.branchIfNotInt32(operandRegs));
9227 }
9228 m_jit.storeValue(operandRegs, buffer + operandIdx);
9229 operand.use();
9230 break;
9231 }
9232 default:
9233 CRASH();
9234 break;
9235 }
9236 }
9237
9238 flushRegisters();
9239
9240 GPRFlushedCallResult result(this);
9241 GPRReg resultGPR = result.gpr();
9242
9243 callOperation(
9244 operationNewArray, resultGPR, TrustedImmPtr::weakPointer(m_graph, globalObject), m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())),
9245 static_cast<void*>(buffer), size_t(node->numChildren()));
9246 m_jit.exceptionCheck();
9247
9248 cellResult(resultGPR, node, UseChildrenCalledExplicitly);
9249}
9250
9251void SpeculativeJIT::compileNewArrayWithSpread(Node* node)
9252{
9253 ASSERT(node->op() == NewArrayWithSpread);
9254 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
9255
9256#if USE(JSVALUE64)
9257 if (m_jit.graph().isWatchingHavingABadTimeWatchpoint(node)) {
9258 GPRTemporary result(this);
9259 GPRReg resultGPR = result.gpr();
9260
9261 BitVector* bitVector = node->bitVector();
9262
9263 if (node->numChildren() == 1 && bitVector->get(0)) {
9264 Edge use = m_jit.graph().varArgChild(node, 0);
9265 SpeculateCellOperand immutableButterfly(this, use);
9266 GPRTemporary result(this);
9267 GPRTemporary butterfly(this);
9268 GPRTemporary scratch1(this);
9269 GPRTemporary scratch2(this);
9270
9271 GPRReg immutableButterflyGPR = immutableButterfly.gpr();
9272 GPRReg resultGPR = result.gpr();
9273 GPRReg butterflyGPR = butterfly.gpr();
9274 GPRReg scratch1GPR = scratch1.gpr();
9275 GPRReg scratch2GPR = scratch2.gpr();
9276
9277 RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->originalArrayStructureForIndexingType(CopyOnWriteArrayWithContiguous));
9278
9279 MacroAssembler::JumpList slowCases;
9280
9281 m_jit.move(immutableButterflyGPR, butterflyGPR);
9282 m_jit.addPtr(TrustedImm32(JSImmutableButterfly::offsetOfData()), butterflyGPR);
9283
9284 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), butterflyGPR, scratch1GPR, scratch2GPR, slowCases);
9285
9286 addSlowPathGenerator(slowPathCall(slowCases, this, operationNewArrayBuffer, resultGPR, &vm(), structure, immutableButterflyGPR));
9287
9288 cellResult(resultGPR, node);
9289 return;
9290 }
9291
9292 {
9293 unsigned startLength = 0;
9294 for (unsigned i = 0; i < node->numChildren(); ++i) {
9295 if (!bitVector->get(i))
9296 ++startLength;
9297 }
9298
9299 GPRTemporary length(this);
9300 GPRReg lengthGPR = length.gpr();
9301 m_jit.move(TrustedImm32(startLength), lengthGPR);
9302
9303 for (unsigned i = 0; i < node->numChildren(); ++i) {
9304 if (bitVector->get(i)) {
9305 Edge use = m_jit.graph().varArgChild(node, i);
9306 SpeculateCellOperand immutableButterfly(this, use);
9307 GPRReg immutableButterflyGPR = immutableButterfly.gpr();
9308 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branchAdd32(MacroAssembler::Overflow, MacroAssembler::Address(immutableButterflyGPR, JSImmutableButterfly::offsetOfPublicLength()), lengthGPR));
9309 }
9310 }
9311
9312 speculationCheck(Overflow, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::AboveOrEqual, lengthGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
9313
9314 // We can tell compileAllocateNewArrayWithSize() that it does not need to
9315 // check for large arrays and use ArrayStorage structure because we already
9316 // ensured above that the spread array length will definitely fit in a
9317 // non-ArrayStorage shaped array.
9318 bool shouldAllowForArrayStorageStructureForLargeArrays = false;
9319 compileAllocateNewArrayWithSize(globalObject, resultGPR, lengthGPR, ArrayWithContiguous, shouldAllowForArrayStorageStructureForLargeArrays);
9320 }
9321
9322 GPRTemporary index(this);
9323 GPRReg indexGPR = index.gpr();
9324
9325 GPRTemporary storage(this);
9326 GPRReg storageGPR = storage.gpr();
9327
9328 m_jit.move(TrustedImm32(0), indexGPR);
9329 m_jit.loadPtr(MacroAssembler::Address(resultGPR, JSObject::butterflyOffset()), storageGPR);
9330
9331 for (unsigned i = 0; i < node->numChildren(); ++i) {
9332 Edge use = m_jit.graph().varArgChild(node, i);
9333 if (bitVector->get(i)) {
9334 SpeculateCellOperand immutableButterfly(this, use);
9335 GPRReg immutableButterflyGPR = immutableButterfly.gpr();
9336
9337 GPRTemporary immutableButterflyIndex(this);
9338 GPRReg immutableButterflyIndexGPR = immutableButterflyIndex.gpr();
9339
9340 GPRTemporary item(this);
9341 GPRReg itemGPR = item.gpr();
9342
9343 GPRTemporary immutableButterflyLength(this);
9344 GPRReg immutableButterflyLengthGPR = immutableButterflyLength.gpr();
9345
9346 m_jit.load32(MacroAssembler::Address(immutableButterflyGPR, JSImmutableButterfly::offsetOfPublicLength()), immutableButterflyLengthGPR);
9347 m_jit.move(TrustedImm32(0), immutableButterflyIndexGPR);
9348 auto done = m_jit.branchPtr(MacroAssembler::AboveOrEqual, immutableButterflyIndexGPR, immutableButterflyLengthGPR);
9349 auto loopStart = m_jit.label();
9350 m_jit.load64(
9351 MacroAssembler::BaseIndex(immutableButterflyGPR, immutableButterflyIndexGPR, MacroAssembler::TimesEight, JSImmutableButterfly::offsetOfData()),
9352 itemGPR);
9353
9354 m_jit.store64(itemGPR, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight));
9355 m_jit.addPtr(TrustedImm32(1), immutableButterflyIndexGPR);
9356 m_jit.addPtr(TrustedImm32(1), indexGPR);
9357 m_jit.branchPtr(MacroAssembler::Below, immutableButterflyIndexGPR, immutableButterflyLengthGPR).linkTo(loopStart, &m_jit);
9358
9359 done.link(&m_jit);
9360 } else {
9361 JSValueOperand item(this, use);
9362 GPRReg itemGPR = item.gpr();
9363 m_jit.store64(itemGPR, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight));
9364 m_jit.addPtr(TrustedImm32(1), indexGPR);
9365 }
9366 }
9367
9368 cellResult(resultGPR, node);
9369 return;
9370 }
9371#endif // USE(JSVALUE64)
9372
9373 ASSERT(node->numChildren());
9374 size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();
9375 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
9376 EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
9377
9378 BitVector* bitVector = node->bitVector();
9379 for (unsigned i = 0; i < node->numChildren(); ++i) {
9380 Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + i];
9381 if (bitVector->get(i)) {
9382 SpeculateCellOperand immutableButterfly(this, use);
9383 GPRReg immutableButterflyGPR = immutableButterfly.gpr();
9384#if USE(JSVALUE64)
9385 m_jit.store64(immutableButterflyGPR, &buffer[i]);
9386#else
9387 char* pointer = static_cast<char*>(static_cast<void*>(&buffer[i]));
9388 m_jit.store32(immutableButterflyGPR, pointer + PayloadOffset);
9389 m_jit.store32(TrustedImm32(JSValue::CellTag), pointer + TagOffset);
9390#endif
9391 } else {
9392 JSValueOperand input(this, use);
9393 JSValueRegs inputRegs = input.jsValueRegs();
9394 m_jit.storeValue(inputRegs, &buffer[i]);
9395 }
9396 }
9397
9398 flushRegisters();
9399
9400 GPRFlushedCallResult result(this);
9401 GPRReg resultGPR = result.gpr();
9402
9403 callOperation(operationNewArrayWithSpreadSlow, resultGPR, TrustedImmPtr::weakPointer(m_graph, globalObject), buffer, node->numChildren());
9404 m_jit.exceptionCheck();
9405
9406 cellResult(resultGPR, node);
9407}
9408
9409void SpeculativeJIT::compileGetRestLength(Node* node)
9410{
9411 ASSERT(node->op() == GetRestLength);
9412
9413 GPRTemporary result(this);
9414 GPRReg resultGPR = result.gpr();
9415
9416 emitGetLength(node->origin.semantic, resultGPR);
9417 CCallHelpers::Jump hasNonZeroLength = m_jit.branch32(MacroAssembler::Above, resultGPR, Imm32(node->numberOfArgumentsToSkip()));
9418 m_jit.move(TrustedImm32(0), resultGPR);
9419 CCallHelpers::Jump done = m_jit.jump();
9420 hasNonZeroLength.link(&m_jit);
9421 if (node->numberOfArgumentsToSkip())
9422 m_jit.sub32(TrustedImm32(node->numberOfArgumentsToSkip()), resultGPR);
9423 done.link(&m_jit);
9424 strictInt32Result(resultGPR, node);
9425}
9426
9427void SpeculativeJIT::emitPopulateSliceIndex(Edge& target, std::optional<GPRReg> indexGPR, GPRReg lengthGPR, GPRReg resultGPR)
9428{
9429 if (target->isInt32Constant()) {
9430 int32_t value = target->asInt32();
9431 if (value == 0) {
9432 m_jit.move(TrustedImm32(0), resultGPR);
9433 return;
9434 }
9435
9436 MacroAssembler::JumpList done;
9437 if (value > 0) {
9438 m_jit.move(TrustedImm32(value), resultGPR);
9439 done.append(m_jit.branch32(MacroAssembler::BelowOrEqual, resultGPR, lengthGPR));
9440 m_jit.move(lengthGPR, resultGPR);
9441 } else {
9442 ASSERT(value != 0);
9443 m_jit.move(lengthGPR, resultGPR);
9444 done.append(m_jit.branchAdd32(MacroAssembler::PositiveOrZero, TrustedImm32(value), resultGPR));
9445 m_jit.move(TrustedImm32(0), resultGPR);
9446 }
9447 done.link(&m_jit);
9448 return;
9449 }
9450
9451 std::optional<SpeculateInt32Operand> index;
9452 if (!indexGPR) {
9453 index.emplace(this, target);
9454 indexGPR = index->gpr();
9455 }
9456 MacroAssembler::JumpList done;
9457
9458 auto isPositive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, indexGPR.value(), TrustedImm32(0));
9459 m_jit.move(lengthGPR, resultGPR);
9460 done.append(m_jit.branchAdd32(MacroAssembler::PositiveOrZero, indexGPR.value(), resultGPR));
9461 m_jit.move(TrustedImm32(0), resultGPR);
9462 done.append(m_jit.jump());
9463
9464 isPositive.link(&m_jit);
9465 m_jit.move(indexGPR.value(), resultGPR);
9466 done.append(m_jit.branch32(MacroAssembler::BelowOrEqual, resultGPR, lengthGPR));
9467 m_jit.move(lengthGPR, resultGPR);
9468
9469 done.link(&m_jit);
9470}
9471
9472void SpeculativeJIT::compileArraySlice(Node* node)
9473{
9474 ASSERT(node->op() == ArraySlice);
9475
9476 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
9477
9478 GPRTemporary temp(this);
9479 StorageOperand storage(this, m_jit.graph().varArgChild(node, node->numChildren() - 1));
9480 GPRTemporary result(this);
9481
9482 GPRReg storageGPR = storage.gpr();
9483 GPRReg resultGPR = result.gpr();
9484 GPRReg tempGPR = temp.gpr();
9485
9486 if (node->numChildren() == 2)
9487 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), tempGPR);
9488 else {
9489 ASSERT(node->numChildren() == 3 || node->numChildren() == 4);
9490 GPRTemporary tempLength(this);
9491 GPRReg lengthGPR = tempLength.gpr();
9492 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), lengthGPR);
9493
9494 if (node->numChildren() == 4)
9495 emitPopulateSliceIndex(m_jit.graph().varArgChild(node, 2), std::nullopt, lengthGPR, tempGPR);
9496 else
9497 m_jit.move(lengthGPR, tempGPR);
9498
9499 if (m_jit.graph().varArgChild(node, 1)->isInt32Constant() && m_jit.graph().varArgChild(node, 1)->asInt32() == 0) {
9500 // Do nothing for array.slice(0, end) or array.slice(0) cases.
9501 // `tempGPR` already points to the size of a newly created array.
9502 } else {
9503 GPRTemporary tempStartIndex(this);
9504 GPRReg startGPR = tempStartIndex.gpr();
9505 emitPopulateSliceIndex(m_jit.graph().varArgChild(node, 1), std::nullopt, lengthGPR, startGPR);
9506
9507 auto tooBig = m_jit.branch32(MacroAssembler::Above, startGPR, tempGPR);
9508 m_jit.sub32(startGPR, tempGPR); // the size of the array we'll make.
9509 auto done = m_jit.jump();
9510
9511 tooBig.link(&m_jit);
9512 m_jit.move(TrustedImm32(0), tempGPR);
9513 done.link(&m_jit);
9514 }
9515 }
9516
9517 GPRTemporary temp3(this);
9518 GPRReg tempValue = temp3.gpr();
9519
9520 {
9521 // We need to keep the source array alive at least until after we're done
9522 // with anything that can GC (e.g. allocating the result array below).
9523 SpeculateCellOperand cell(this, m_jit.graph().varArgChild(node, 0));
9524
9525 m_jit.load8(MacroAssembler::Address(cell.gpr(), JSCell::indexingTypeAndMiscOffset()), tempValue);
9526 // We can ignore the writability of the cell since we won't write to the source.
9527 m_jit.and32(TrustedImm32(AllWritableArrayTypesAndHistory), tempValue);
9528
9529 JSValueRegsTemporary emptyValue(this);
9530 JSValueRegs emptyValueRegs = emptyValue.regs();
9531
9532 GPRTemporary storage(this);
9533 GPRReg storageResultGPR = storage.gpr();
9534
9535 GPRReg sizeGPR = tempGPR;
9536
9537 CCallHelpers::JumpList done;
9538
9539 auto emitMoveEmptyValue = [&] (JSValue v) {
9540 m_jit.moveValue(v, emptyValueRegs);
9541 };
9542
9543 auto isContiguous = m_jit.branch32(MacroAssembler::Equal, tempValue, TrustedImm32(ArrayWithContiguous));
9544 auto isInt32 = m_jit.branch32(MacroAssembler::Equal, tempValue, TrustedImm32(ArrayWithInt32));
9545 // When we emit an ArraySlice, we dominate the use of the array by a CheckStructure
9546 // to ensure the incoming array is one to be one of the original array structures
9547 // with one of the following indexing shapes: Int32, Contiguous, Double. Therefore,
9548 // we're a double array here.
9549 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithDouble))), tempValue);
9550 emitMoveEmptyValue(jsNaN());
9551 done.append(m_jit.jump());
9552
9553 isContiguous.link(&m_jit);
9554 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous))), tempValue);
9555 emitMoveEmptyValue(JSValue());
9556 done.append(m_jit.jump());
9557
9558 isInt32.link(&m_jit);
9559 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithInt32))), tempValue);
9560 emitMoveEmptyValue(JSValue());
9561
9562 done.link(&m_jit);
9563
9564 MacroAssembler::JumpList slowCases;
9565 m_jit.move(TrustedImmPtr(nullptr), storageResultGPR);
9566 // Enable the fast case on 64-bit platforms, where a sufficient amount of GP registers should be available.
9567 // Other platforms could support the same approach with custom code, but that is not currently worth the extra code maintenance.
9568 if (is64Bit()) {
9569 GPRTemporary scratch(this);
9570 GPRTemporary scratch2(this);
9571 GPRReg scratchGPR = scratch.gpr();
9572 GPRReg scratch2GPR = scratch2.gpr();
9573
9574 emitAllocateButterfly(storageResultGPR, sizeGPR, scratchGPR, scratch2GPR, resultGPR, slowCases);
9575 emitInitializeButterfly(storageResultGPR, sizeGPR, emptyValueRegs, scratchGPR);
9576 emitAllocateJSObject<JSArray>(resultGPR, tempValue, storageResultGPR, scratchGPR, scratch2GPR, slowCases);
9577 m_jit.mutatorFence(vm());
9578 } else {
9579 slowCases.append(m_jit.jump());
9580 }
9581
9582 addSlowPathGenerator(makeUnique<CallArrayAllocatorWithVariableStructureVariableSizeSlowPathGenerator>(
9583 slowCases, this, operationNewArrayWithSize, resultGPR, TrustedImmPtr::weakPointer(m_graph, globalObject), tempValue, sizeGPR, storageResultGPR));
9584 }
9585
9586 GPRTemporary temp4(this);
9587 GPRReg loadIndex = temp4.gpr();
9588
9589 if (node->numChildren() == 2) {
9590 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), tempGPR);
9591 m_jit.move(TrustedImm32(0), loadIndex);
9592 } else {
9593 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), tempValue);
9594 if (node->numChildren() == 4)
9595 emitPopulateSliceIndex(m_jit.graph().varArgChild(node, 2), std::nullopt, tempValue, tempGPR);
9596 else
9597 m_jit.move(tempValue, tempGPR);
9598 emitPopulateSliceIndex(m_jit.graph().varArgChild(node, 1), std::nullopt, tempValue, loadIndex);
9599 }
9600
9601 GPRTemporary temp5(this);
9602 GPRReg storeIndex = temp5.gpr();
9603 m_jit.move(TrustedImmPtr(nullptr), storeIndex);
9604
9605 GPRTemporary temp2(this);
9606 GPRReg resultButterfly = temp2.gpr();
9607
9608 m_jit.loadPtr(MacroAssembler::Address(resultGPR, JSObject::butterflyOffset()), resultButterfly);
9609 m_jit.zeroExtend32ToWord(tempGPR, tempGPR);
9610 m_jit.zeroExtend32ToWord(loadIndex, loadIndex);
9611 auto done = m_jit.branchPtr(MacroAssembler::AboveOrEqual, loadIndex, tempGPR);
9612
9613 auto loop = m_jit.label();
9614#if USE(JSVALUE64)
9615 m_jit.load64(
9616 MacroAssembler::BaseIndex(storageGPR, loadIndex, MacroAssembler::TimesEight), tempValue);
9617 m_jit.store64(
9618 tempValue, MacroAssembler::BaseIndex(resultButterfly, storeIndex, MacroAssembler::TimesEight));
9619#else
9620 m_jit.load32(
9621 MacroAssembler::BaseIndex(storageGPR, loadIndex, MacroAssembler::TimesEight, PayloadOffset), tempValue);
9622 m_jit.store32(
9623 tempValue, MacroAssembler::BaseIndex(resultButterfly, storeIndex, MacroAssembler::TimesEight, PayloadOffset));
9624 m_jit.load32(
9625 MacroAssembler::BaseIndex(storageGPR, loadIndex, MacroAssembler::TimesEight, TagOffset), tempValue);
9626 m_jit.store32(
9627 tempValue, MacroAssembler::BaseIndex(resultButterfly, storeIndex, MacroAssembler::TimesEight, TagOffset));
9628#endif // USE(JSVALUE64)
9629 m_jit.addPtr(TrustedImm32(1), loadIndex);
9630 m_jit.addPtr(TrustedImm32(1), storeIndex);
9631 m_jit.branchPtr(MacroAssembler::Below, loadIndex, tempGPR).linkTo(loop, &m_jit);
9632
9633 done.link(&m_jit);
9634 cellResult(resultGPR, node);
9635}
9636
9637void SpeculativeJIT::compileArrayIndexOf(Node* node)
9638{
9639 ASSERT(node->op() == ArrayIndexOf);
9640
9641 StorageOperand storage(this, m_jit.graph().varArgChild(node, node->numChildren() == 3 ? 2 : 3));
9642 GPRTemporary index(this);
9643 GPRTemporary tempLength(this);
9644
9645 GPRReg storageGPR = storage.gpr();
9646 GPRReg indexGPR = index.gpr();
9647 GPRReg lengthGPR = tempLength.gpr();
9648
9649 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), lengthGPR);
9650
9651 if (node->numChildren() == 4)
9652 emitPopulateSliceIndex(m_jit.graph().varArgChild(node, 2), std::nullopt, lengthGPR, indexGPR);
9653 else
9654 m_jit.move(TrustedImm32(0), indexGPR);
9655
9656 Edge& searchElementEdge = m_jit.graph().varArgChild(node, 1);
9657 switch (searchElementEdge.useKind()) {
9658 case Int32Use:
9659 case ObjectUse:
9660 case SymbolUse:
9661 case OtherUse: {
9662 auto emitLoop = [&] (auto emitCompare) {
9663#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
9664 m_jit.clearRegisterAllocationOffsets();
9665#endif
9666
9667 m_jit.zeroExtend32ToWord(lengthGPR, lengthGPR);
9668 m_jit.zeroExtend32ToWord(indexGPR, indexGPR);
9669
9670 auto loop = m_jit.label();
9671 auto notFound = m_jit.branch32(CCallHelpers::Equal, indexGPR, lengthGPR);
9672
9673 auto found = emitCompare();
9674
9675 m_jit.add32(TrustedImm32(1), indexGPR);
9676 m_jit.jump().linkTo(loop, &m_jit);
9677
9678 notFound.link(&m_jit);
9679 m_jit.move(TrustedImm32(-1), indexGPR);
9680 found.link(&m_jit);
9681 strictInt32Result(indexGPR, node);
9682 };
9683
9684 if (searchElementEdge.useKind() == Int32Use) {
9685 ASSERT(node->arrayMode().type() == Array::Int32);
9686#if USE(JSVALUE64)
9687 JSValueOperand searchElement(this, searchElementEdge, ManualOperandSpeculation);
9688 JSValueRegs searchElementRegs = searchElement.jsValueRegs();
9689 speculateInt32(searchElementEdge, searchElementRegs);
9690 GPRReg searchElementGPR = searchElementRegs.payloadGPR();
9691#else
9692 SpeculateInt32Operand searchElement(this, searchElementEdge);
9693 GPRReg searchElementGPR = searchElement.gpr();
9694
9695 GPRTemporary temp(this);
9696 GPRReg tempGPR = temp.gpr();
9697#endif
9698 emitLoop([&] () {
9699#if USE(JSVALUE64)
9700 auto found = m_jit.branch64(CCallHelpers::Equal, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), searchElementGPR);
9701#else
9702 auto skip = m_jit.branch32(CCallHelpers::NotEqual, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, TagOffset), TrustedImm32(JSValue::Int32Tag));
9703 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, PayloadOffset), tempGPR);
9704 auto found = m_jit.branch32(CCallHelpers::Equal, tempGPR, searchElementGPR);
9705 skip.link(&m_jit);
9706#endif
9707 return found;
9708 });
9709 return;
9710 }
9711
9712 if (searchElementEdge.useKind() == OtherUse) {
9713 ASSERT(node->arrayMode().type() == Array::Contiguous);
9714 JSValueOperand searchElement(this, searchElementEdge, ManualOperandSpeculation);
9715 GPRTemporary temp(this);
9716
9717 JSValueRegs searchElementRegs = searchElement.jsValueRegs();
9718 GPRReg tempGPR = temp.gpr();
9719 speculateOther(searchElementEdge, searchElementRegs, tempGPR);
9720
9721 emitLoop([&] () {
9722#if USE(JSVALUE64)
9723 auto found = m_jit.branch64(CCallHelpers::Equal, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), searchElementRegs.payloadGPR());
9724#else
9725 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, TagOffset), tempGPR);
9726 auto found = m_jit.branch32(CCallHelpers::Equal, tempGPR, searchElementRegs.tagGPR());
9727#endif
9728 return found;
9729 });
9730 return;
9731 }
9732
9733 ASSERT(node->arrayMode().type() == Array::Contiguous);
9734 SpeculateCellOperand searchElement(this, searchElementEdge);
9735 GPRReg searchElementGPR = searchElement.gpr();
9736
9737 if (searchElementEdge.useKind() == ObjectUse)
9738 speculateObject(searchElementEdge, searchElementGPR);
9739 else {
9740 ASSERT(searchElementEdge.useKind() == SymbolUse);
9741 speculateSymbol(searchElementEdge, searchElementGPR);
9742 }
9743
9744#if USE(JSVALUE32_64)
9745 GPRTemporary temp(this);
9746 GPRReg tempGPR = temp.gpr();
9747#endif
9748
9749 emitLoop([&] () {
9750#if USE(JSVALUE64)
9751 auto found = m_jit.branch64(CCallHelpers::Equal, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), searchElementGPR);
9752#else
9753 auto skip = m_jit.branch32(CCallHelpers::NotEqual, MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, TagOffset), TrustedImm32(JSValue::CellTag));
9754 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, PayloadOffset), tempGPR);
9755 auto found = m_jit.branch32(CCallHelpers::Equal, tempGPR, searchElementGPR);
9756 skip.link(&m_jit);
9757#endif
9758 return found;
9759 });
9760 return;
9761 }
9762
9763 case DoubleRepUse: {
9764 ASSERT(node->arrayMode().type() == Array::Double);
9765 SpeculateDoubleOperand searchElement(this, searchElementEdge);
9766 FPRTemporary tempDouble(this);
9767
9768 FPRReg searchElementFPR = searchElement.fpr();
9769 FPRReg tempFPR = tempDouble.fpr();
9770
9771#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
9772 m_jit.clearRegisterAllocationOffsets();
9773#endif
9774
9775 m_jit.zeroExtend32ToWord(lengthGPR, lengthGPR);
9776 m_jit.zeroExtend32ToWord(indexGPR, indexGPR);
9777
9778 auto loop = m_jit.label();
9779 auto notFound = m_jit.branch32(CCallHelpers::Equal, indexGPR, lengthGPR);
9780 m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), tempFPR);
9781 auto found = m_jit.branchDouble(CCallHelpers::DoubleEqualAndOrdered, tempFPR, searchElementFPR);
9782 m_jit.add32(TrustedImm32(1), indexGPR);
9783 m_jit.jump().linkTo(loop, &m_jit);
9784
9785 notFound.link(&m_jit);
9786 m_jit.move(TrustedImm32(-1), indexGPR);
9787 found.link(&m_jit);
9788 strictInt32Result(indexGPR, node);
9789 return;
9790 }
9791
9792 case StringUse: {
9793 ASSERT(node->arrayMode().type() == Array::Contiguous);
9794 SpeculateCellOperand searchElement(this, searchElementEdge);
9795
9796 GPRReg searchElementGPR = searchElement.gpr();
9797
9798 speculateString(searchElementEdge, searchElementGPR);
9799
9800 flushRegisters();
9801
9802 callOperation(operationArrayIndexOfString, lengthGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), storageGPR, searchElementGPR, indexGPR);
9803 m_jit.exceptionCheck();
9804
9805 strictInt32Result(lengthGPR, node);
9806 return;
9807 }
9808
9809 case UntypedUse: {
9810 JSValueOperand searchElement(this, searchElementEdge);
9811
9812 JSValueRegs searchElementRegs = searchElement.jsValueRegs();
9813
9814 flushRegisters();
9815 switch (node->arrayMode().type()) {
9816 case Array::Double:
9817 callOperation(operationArrayIndexOfValueDouble, lengthGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), storageGPR, searchElementRegs, indexGPR);
9818 break;
9819 case Array::Int32:
9820 case Array::Contiguous:
9821 callOperation(operationArrayIndexOfValueInt32OrContiguous, lengthGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), storageGPR, searchElementRegs, indexGPR);
9822 break;
9823 default:
9824 RELEASE_ASSERT_NOT_REACHED();
9825 break;
9826 }
9827 m_jit.exceptionCheck();
9828
9829 strictInt32Result(lengthGPR, node);
9830 return;
9831 }
9832
9833 default:
9834 RELEASE_ASSERT_NOT_REACHED();
9835 return;
9836 }
9837}
9838
9839void SpeculativeJIT::compileArrayPush(Node* node)
9840{
9841 ASSERT(node->arrayMode().isJSArray());
9842
9843 Edge& storageEdge = m_jit.graph().varArgChild(node, 0);
9844 Edge& arrayEdge = m_jit.graph().varArgChild(node, 1);
9845
9846 SpeculateCellOperand base(this, arrayEdge);
9847 GPRTemporary storageLength(this);
9848
9849 GPRReg baseGPR = base.gpr();
9850 GPRReg storageLengthGPR = storageLength.gpr();
9851
9852 StorageOperand storage(this, storageEdge);
9853 GPRReg storageGPR = storage.gpr();
9854 unsigned elementOffset = 2;
9855 unsigned elementCount = node->numChildren() - elementOffset;
9856
9857#if USE(JSVALUE32_64)
9858 GPRTemporary tag(this);
9859 GPRReg tagGPR = tag.gpr();
9860 JSValueRegs resultRegs { tagGPR, storageLengthGPR };
9861#else
9862 JSValueRegs resultRegs { storageLengthGPR };
9863#endif
9864
9865 auto getStorageBufferAddress = [&] (GPRReg storageGPR, GPRReg indexGPR, int32_t offset, GPRReg bufferGPR) {
9866 static_assert(sizeof(JSValue) == 8 && 1 << 3 == 8, "This is strongly assumed in the code below.");
9867 m_jit.getEffectiveAddress(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, offset), bufferGPR);
9868 };
9869
9870 switch (node->arrayMode().type()) {
9871 case Array::Int32:
9872 case Array::Contiguous: {
9873 if (elementCount == 1) {
9874 Edge& element = m_jit.graph().varArgChild(node, elementOffset);
9875 if (node->arrayMode().type() == Array::Int32) {
9876 ASSERT(element.useKind() == Int32Use);
9877 speculateInt32(element);
9878 }
9879 JSValueOperand value(this, element, ManualOperandSpeculation);
9880 JSValueRegs valueRegs = value.jsValueRegs();
9881
9882 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
9883 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
9884 m_jit.storeValue(valueRegs, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
9885 m_jit.add32(TrustedImm32(1), storageLengthGPR);
9886 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
9887 m_jit.boxInt32(storageLengthGPR, resultRegs);
9888
9889 addSlowPathGenerator(
9890 slowPathCall(slowPath, this, operationArrayPush, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs, baseGPR));
9891
9892 jsValueResult(resultRegs, node);
9893 return;
9894 }
9895
9896 if (node->arrayMode().type() == Array::Int32) {
9897 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
9898 Edge element = m_jit.graph().varArgChild(node, elementIndex + elementOffset);
9899 ASSERT(element.useKind() == Int32Use);
9900 speculateInt32(element);
9901 }
9902 }
9903
9904 GPRTemporary buffer(this);
9905 GPRReg bufferGPR = buffer.gpr();
9906
9907 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
9908 m_jit.move(storageLengthGPR, bufferGPR);
9909 m_jit.add32(TrustedImm32(elementCount), bufferGPR);
9910 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::Above, bufferGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
9911
9912 m_jit.store32(bufferGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
9913 getStorageBufferAddress(storageGPR, storageLengthGPR, 0, bufferGPR);
9914 m_jit.add32(TrustedImm32(elementCount), storageLengthGPR);
9915 m_jit.boxInt32(storageLengthGPR, resultRegs);
9916 auto storageDone = m_jit.jump();
9917
9918 slowPath.link(&m_jit);
9919
9920 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
9921 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
9922 m_jit.move(TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())), bufferGPR);
9923
9924 storageDone.link(&m_jit);
9925 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
9926 Edge& element = m_jit.graph().varArgChild(node, elementIndex + elementOffset);
9927 JSValueOperand value(this, element, ManualOperandSpeculation); // We did type checks above.
9928 JSValueRegs valueRegs = value.jsValueRegs();
9929
9930 m_jit.storeValue(valueRegs, MacroAssembler::Address(bufferGPR, sizeof(EncodedJSValue) * elementIndex));
9931 value.use();
9932 }
9933
9934 MacroAssembler::Jump fastPath = m_jit.branchPtr(MacroAssembler::NotEqual, bufferGPR, TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
9935
9936 addSlowPathGenerator(slowPathCall(m_jit.jump(), this, operationArrayPushMultiple, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, bufferGPR, TrustedImm32(elementCount)));
9937
9938 base.use();
9939 storage.use();
9940
9941 fastPath.link(&m_jit);
9942 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
9943 return;
9944 }
9945
9946 case Array::Double: {
9947 if (elementCount == 1) {
9948 Edge& element = m_jit.graph().varArgChild(node, elementOffset);
9949 speculate(node, element);
9950 SpeculateDoubleOperand value(this, element);
9951 FPRReg valueFPR = value.fpr();
9952
9953 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
9954 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
9955 m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
9956 m_jit.add32(TrustedImm32(1), storageLengthGPR);
9957 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
9958 m_jit.boxInt32(storageLengthGPR, resultRegs);
9959
9960 addSlowPathGenerator(
9961 slowPathCall(slowPath, this, operationArrayPushDouble, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueFPR, baseGPR));
9962
9963 jsValueResult(resultRegs, node);
9964 return;
9965 }
9966
9967 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
9968 Edge element = m_jit.graph().varArgChild(node, elementIndex + elementOffset);
9969 ASSERT(element.useKind() == DoubleRepRealUse);
9970 speculate(node, element);
9971 }
9972
9973 GPRTemporary buffer(this);
9974 GPRReg bufferGPR = buffer.gpr();
9975
9976 m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
9977 m_jit.move(storageLengthGPR, bufferGPR);
9978 m_jit.add32(TrustedImm32(elementCount), bufferGPR);
9979 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::Above, bufferGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
9980
9981 m_jit.store32(bufferGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
9982 getStorageBufferAddress(storageGPR, storageLengthGPR, 0, bufferGPR);
9983 m_jit.add32(TrustedImm32(elementCount), storageLengthGPR);
9984 m_jit.boxInt32(storageLengthGPR, resultRegs);
9985 auto storageDone = m_jit.jump();
9986
9987 slowPath.link(&m_jit);
9988
9989 size_t scratchSize = sizeof(double) * elementCount;
9990 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
9991 m_jit.move(TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())), bufferGPR);
9992
9993 storageDone.link(&m_jit);
9994 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
9995 Edge& element = m_jit.graph().varArgChild(node, elementIndex + elementOffset);
9996 SpeculateDoubleOperand value(this, element);
9997 FPRReg valueFPR = value.fpr();
9998
9999 m_jit.storeDouble(valueFPR, MacroAssembler::Address(bufferGPR, sizeof(double) * elementIndex));
10000 value.use();
10001 }
10002
10003 MacroAssembler::Jump fastPath = m_jit.branchPtr(MacroAssembler::NotEqual, bufferGPR, TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
10004
10005 addSlowPathGenerator(slowPathCall(m_jit.jump(), this, operationArrayPushDoubleMultiple, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, bufferGPR, TrustedImm32(elementCount)));
10006
10007 base.use();
10008 storage.use();
10009
10010 fastPath.link(&m_jit);
10011 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
10012 return;
10013 }
10014
10015 case Array::ArrayStorage: {
10016 // This ensures that the result of ArrayPush is Int32 in AI.
10017 int32_t largestPositiveInt32Length = 0x7fffffff - elementCount;
10018 if (elementCount == 1) {
10019 Edge& element = m_jit.graph().varArgChild(node, elementOffset);
10020 JSValueOperand value(this, element);
10021 JSValueRegs valueRegs = value.jsValueRegs();
10022
10023 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
10024
10025 // Refuse to handle bizarre lengths.
10026 speculationCheck(Uncountable, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(largestPositiveInt32Length)));
10027
10028 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
10029
10030 m_jit.storeValue(valueRegs, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()));
10031
10032 m_jit.add32(TrustedImm32(1), storageLengthGPR);
10033 m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
10034 m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
10035 m_jit.boxInt32(storageLengthGPR, resultRegs);
10036
10037 addSlowPathGenerator(
10038 slowPathCall(slowPath, this, operationArrayPush, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs, baseGPR));
10039
10040 jsValueResult(resultRegs, node);
10041 return;
10042 }
10043
10044 GPRTemporary buffer(this);
10045 GPRReg bufferGPR = buffer.gpr();
10046
10047 m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
10048
10049 // Refuse to handle bizarre lengths.
10050 speculationCheck(Uncountable, JSValueRegs(), nullptr, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(largestPositiveInt32Length)));
10051
10052 m_jit.move(storageLengthGPR, bufferGPR);
10053 m_jit.add32(TrustedImm32(elementCount), bufferGPR);
10054 MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::Above, bufferGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
10055
10056 m_jit.store32(bufferGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
10057 getStorageBufferAddress(storageGPR, storageLengthGPR, ArrayStorage::vectorOffset(), bufferGPR);
10058 m_jit.add32(TrustedImm32(elementCount), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
10059 m_jit.add32(TrustedImm32(elementCount), storageLengthGPR);
10060 m_jit.boxInt32(storageLengthGPR, resultRegs);
10061 auto storageDone = m_jit.jump();
10062
10063 slowPath.link(&m_jit);
10064
10065 size_t scratchSize = sizeof(EncodedJSValue) * elementCount;
10066 ScratchBuffer* scratchBuffer = vm().scratchBufferForSize(scratchSize);
10067 m_jit.move(TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())), bufferGPR);
10068
10069 storageDone.link(&m_jit);
10070 for (unsigned elementIndex = 0; elementIndex < elementCount; ++elementIndex) {
10071 Edge& element = m_jit.graph().varArgChild(node, elementIndex + elementOffset);
10072 JSValueOperand value(this, element);
10073 JSValueRegs valueRegs = value.jsValueRegs();
10074
10075 m_jit.storeValue(valueRegs, MacroAssembler::Address(bufferGPR, sizeof(EncodedJSValue) * elementIndex));
10076 value.use();
10077 }
10078
10079 MacroAssembler::Jump fastPath = m_jit.branchPtr(MacroAssembler::NotEqual, bufferGPR, TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer())));
10080
10081 addSlowPathGenerator(
10082 slowPathCall(m_jit.jump(), this, operationArrayPushMultiple, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, bufferGPR, TrustedImm32(elementCount)));
10083
10084 base.use();
10085 storage.use();
10086
10087 fastPath.link(&m_jit);
10088 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
10089 return;
10090 }
10091
10092 default:
10093 RELEASE_ASSERT_NOT_REACHED();
10094 }
10095}
10096
10097void SpeculativeJIT::compileNotifyWrite(Node* node)
10098{
10099 WatchpointSet* set = node->watchpointSet();
10100
10101 JITCompiler::Jump slowCase = m_jit.branch8(
10102 JITCompiler::NotEqual,
10103 JITCompiler::AbsoluteAddress(set->addressOfState()),
10104 TrustedImm32(IsInvalidated));
10105
10106 addSlowPathGenerator(
10107 slowPathCall(slowCase, this, operationNotifyWrite, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, NoResult, &vm(), set));
10108
10109 noResult(node);
10110}
10111
10112void SpeculativeJIT::compileIsObject(Node* node)
10113{
10114 JSValueOperand value(this, node->child1());
10115 GPRTemporary result(this, Reuse, value, TagWord);
10116
10117 JSValueRegs valueRegs = value.jsValueRegs();
10118 GPRReg resultGPR = result.gpr();
10119
10120 JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
10121
10122 m_jit.compare8(JITCompiler::AboveOrEqual,
10123 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoTypeOffset()),
10124 TrustedImm32(ObjectType),
10125 resultGPR);
10126 JITCompiler::Jump done = m_jit.jump();
10127
10128 isNotCell.link(&m_jit);
10129 m_jit.move(TrustedImm32(0), resultGPR);
10130
10131 done.link(&m_jit);
10132 unblessedBooleanResult(resultGPR, node);
10133}
10134
10135void SpeculativeJIT::compileTypeOfIsObject(Node* node)
10136{
10137 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
10138
10139 JSValueOperand value(this, node->child1());
10140 JSValueRegs valueRegs = value.jsValueRegs();
10141
10142 GPRTemporary result(this);
10143 GPRReg resultGPR = result.gpr();
10144
10145 JITCompiler::Jump isCell = m_jit.branchIfCell(valueRegs);
10146
10147 JITCompiler::Jump isNull = m_jit.branchIfEqual(valueRegs, jsNull());
10148 JITCompiler::Jump isNonNullNonCell = m_jit.jump();
10149
10150 isCell.link(&m_jit);
10151 JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
10152 JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
10153
10154 JITCompiler::Jump slowPath = m_jit.branchTest8(
10155 JITCompiler::NonZero,
10156 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
10157 TrustedImm32(MasqueradesAsUndefined | OverridesGetCallData));
10158
10159 isNull.link(&m_jit);
10160 m_jit.move(TrustedImm32(1), resultGPR);
10161 JITCompiler::Jump done = m_jit.jump();
10162
10163 isNonNullNonCell.link(&m_jit);
10164 isFunction.link(&m_jit);
10165 notObject.link(&m_jit);
10166 m_jit.move(TrustedImm32(0), resultGPR);
10167
10168 addSlowPathGenerator(
10169 slowPathCall(
10170 slowPath, this, operationTypeOfIsObject, resultGPR, globalObject,
10171 valueRegs.payloadGPR()));
10172
10173 done.link(&m_jit);
10174
10175 unblessedBooleanResult(resultGPR, node);
10176}
10177
10178void SpeculativeJIT::compileIsCallable(Node* node, S_JITOperation_GC slowPathOperation)
10179{
10180 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
10181
10182 JSValueOperand value(this, node->child1());
10183 JSValueRegs valueRegs = value.jsValueRegs();
10184
10185 GPRTemporary result(this);
10186 GPRReg resultGPR = result.gpr();
10187
10188 JITCompiler::Jump notCell = m_jit.branchIfNotCell(valueRegs);
10189 JITCompiler::Jump isFunction = m_jit.branchIfFunction(valueRegs.payloadGPR());
10190 JITCompiler::Jump notObject = m_jit.branchIfNotObject(valueRegs.payloadGPR());
10191
10192 JITCompiler::Jump slowPath = m_jit.branchTest8(
10193 JITCompiler::NonZero,
10194 JITCompiler::Address(valueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
10195 TrustedImm32(MasqueradesAsUndefined | OverridesGetCallData));
10196
10197 notCell.link(&m_jit);
10198 notObject.link(&m_jit);
10199 m_jit.move(TrustedImm32(0), resultGPR);
10200 JITCompiler::Jump done = m_jit.jump();
10201
10202 isFunction.link(&m_jit);
10203 m_jit.move(TrustedImm32(1), resultGPR);
10204
10205 addSlowPathGenerator(
10206 slowPathCall(
10207 slowPath, this, slowPathOperation, resultGPR, globalObject,
10208 valueRegs.payloadGPR()));
10209
10210 done.link(&m_jit);
10211
10212 unblessedBooleanResult(resultGPR, node);
10213}
10214
10215void SpeculativeJIT::compileIsConstructor(Node* node)
10216{
10217 JSValueOperand input(this, node->child1());
10218 JSValueRegs inputRegs = input.jsValueRegs();
10219 flushRegisters();
10220 GPRFlushedCallResult result(this);
10221 GPRReg resultGPR = result.gpr();
10222
10223 callOperation(operationIsConstructor, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), inputRegs);
10224 unblessedBooleanResult(resultGPR, node);
10225}
10226
10227void SpeculativeJIT::compileTypeOf(Node* node)
10228{
10229 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
10230
10231 JSValueOperand value(this, node->child1());
10232 JSValueRegs valueRegs = value.jsValueRegs();
10233
10234 GPRTemporary result(this);
10235 GPRReg resultGPR = result.gpr();
10236
10237 JITCompiler::JumpList done;
10238 JITCompiler::Jump slowPath;
10239 m_jit.emitTypeOf(
10240 valueRegs, resultGPR,
10241 [&] (TypeofType type, bool fallsThrough) {
10242 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), vm().smallStrings.typeString(type)), resultGPR);
10243 if (!fallsThrough)
10244 done.append(m_jit.jump());
10245 },
10246 [&] (JITCompiler::Jump theSlowPath) {
10247 slowPath = theSlowPath;
10248 });
10249 done.link(&m_jit);
10250
10251 addSlowPathGenerator(
10252 slowPathCall(
10253 slowPath, this, operationTypeOfObject, resultGPR, globalObject,
10254 valueRegs.payloadGPR()));
10255
10256 cellResult(resultGPR, node);
10257}
10258
10259void SpeculativeJIT::emitStructureCheck(Node* node, GPRReg cellGPR, GPRReg tempGPR)
10260{
10261 ASSERT(node->structureSet().size());
10262
10263 if (node->structureSet().size() == 1) {
10264 speculationCheck(
10265 BadCache, JSValueSource::unboxedCell(cellGPR), nullptr,
10266 m_jit.branchWeakStructure(
10267 JITCompiler::NotEqual,
10268 JITCompiler::Address(cellGPR, JSCell::structureIDOffset()),
10269 node->structureSet()[0]));
10270 } else {
10271 std::unique_ptr<GPRTemporary> structure;
10272 GPRReg structureGPR;
10273
10274 if (tempGPR == InvalidGPRReg) {
10275 structure = makeUnique<GPRTemporary>(this);
10276 structureGPR = structure->gpr();
10277 } else
10278 structureGPR = tempGPR;
10279
10280 m_jit.load32(JITCompiler::Address(cellGPR, JSCell::structureIDOffset()), structureGPR);
10281
10282 JITCompiler::JumpList done;
10283
10284 for (size_t i = 0; i < node->structureSet().size() - 1; ++i) {
10285 done.append(
10286 m_jit.branchWeakStructure(JITCompiler::Equal, structureGPR, node->structureSet()[i]));
10287 }
10288
10289 speculationCheck(
10290 BadCache, JSValueSource::unboxedCell(cellGPR), nullptr,
10291 m_jit.branchWeakStructure(
10292 JITCompiler::NotEqual, structureGPR, node->structureSet().last()));
10293
10294 done.link(&m_jit);
10295 }
10296}
10297
10298void SpeculativeJIT::compileCheckIsConstant(Node* node)
10299{
10300 if (node->child1().useKind() == CellUse) {
10301 SpeculateCellOperand cell(this, node->child1());
10302 speculationCheck(BadConstantValue, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell()));
10303 } else {
10304 ASSERT(!node->constant()->value().isCell() || !node->constant()->value());
10305 JSValueOperand operand(this, node->child1());
10306 JSValueRegs regs = operand.jsValueRegs();
10307
10308#if USE(JSVALUE64)
10309 speculationCheck(BadConstantValue, regs, node->child1(), m_jit.branch64(JITCompiler::NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(node->constant()->value()))));
10310#else
10311 speculationCheck(BadConstantValue, regs, node->child1(), m_jit.branch32(JITCompiler::NotEqual, regs.tagGPR(), TrustedImm32(node->constant()->value().tag())));
10312 speculationCheck(BadConstantValue, regs, node->child1(), m_jit.branch32(JITCompiler::NotEqual, regs.payloadGPR(), TrustedImm32(node->constant()->value().payload())));
10313#endif
10314 }
10315
10316
10317 noResult(node);
10318}
10319
10320void SpeculativeJIT::compileCheckNotEmpty(Node* node)
10321{
10322 JSValueOperand operand(this, node->child1());
10323 JSValueRegs regs = operand.jsValueRegs();
10324 speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branchIfEmpty(regs));
10325 noResult(node);
10326}
10327
10328void SpeculativeJIT::compileCheckStructure(Node* node)
10329{
10330 switch (node->child1().useKind()) {
10331 case CellUse:
10332 case KnownCellUse: {
10333 SpeculateCellOperand cell(this, node->child1());
10334 emitStructureCheck(node, cell.gpr(), InvalidGPRReg);
10335 noResult(node);
10336 return;
10337 }
10338
10339 case CellOrOtherUse: {
10340 JSValueOperand value(this, node->child1(), ManualOperandSpeculation);
10341 GPRTemporary temp(this);
10342
10343 JSValueRegs valueRegs = value.jsValueRegs();
10344 GPRReg tempGPR = temp.gpr();
10345
10346 JITCompiler::Jump cell = m_jit.branchIfCell(valueRegs);
10347 DFG_TYPE_CHECK(
10348 valueRegs, node->child1(), SpecCell | SpecOther,
10349 m_jit.branchIfNotOther(valueRegs, tempGPR));
10350 JITCompiler::Jump done = m_jit.jump();
10351 cell.link(&m_jit);
10352 emitStructureCheck(node, valueRegs.payloadGPR(), tempGPR);
10353 done.link(&m_jit);
10354 noResult(node);
10355 return;
10356 }
10357
10358 default:
10359 DFG_CRASH(m_jit.graph(), node, "Bad use kind");
10360 return;
10361 }
10362}
10363
10364void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
10365{
10366 ASSERT(!node->transition()->previous->outOfLineCapacity());
10367 ASSERT(initialOutOfLineCapacity == node->transition()->next->outOfLineCapacity());
10368
10369 size_t size = initialOutOfLineCapacity * sizeof(JSValue);
10370
10371 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(size, AllocatorForMode::AllocatorIfExists);
10372
10373 if (!allocator || node->transition()->previous->couldHaveIndexingHeader()) {
10374 SpeculateCellOperand base(this, node->child1());
10375
10376 GPRReg baseGPR = base.gpr();
10377
10378 flushRegisters();
10379
10380 GPRFlushedCallResult result(this);
10381 callOperation(operationAllocateComplexPropertyStorageWithInitialCapacity, result.gpr(), &vm(), baseGPR);
10382 m_jit.exceptionCheck();
10383
10384 storageResult(result.gpr(), node);
10385 return;
10386 }
10387
10388 GPRTemporary scratch1(this);
10389 GPRTemporary scratch2(this);
10390 GPRTemporary scratch3(this);
10391
10392 GPRReg scratchGPR1 = scratch1.gpr();
10393 GPRReg scratchGPR2 = scratch2.gpr();
10394 GPRReg scratchGPR3 = scratch3.gpr();
10395
10396 JITCompiler::JumpList slowPath;
10397 m_jit.emitAllocate(scratchGPR1, JITAllocator::constant(allocator), scratchGPR2, scratchGPR3, slowPath);
10398 m_jit.addPtr(JITCompiler::TrustedImm32(size + sizeof(IndexingHeader)), scratchGPR1);
10399
10400 addSlowPathGenerator(
10401 slowPathCall(slowPath, this, operationAllocateSimplePropertyStorageWithInitialCapacity, scratchGPR1, &vm()));
10402
10403 for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(size); offset += sizeof(void*))
10404 m_jit.storePtr(TrustedImmPtr(nullptr), JITCompiler::Address(scratchGPR1, -(offset + sizeof(JSValue) + sizeof(void*))));
10405
10406 storageResult(scratchGPR1, node);
10407}
10408
10409void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
10410{
10411 size_t oldSize = node->transition()->previous->outOfLineCapacity() * sizeof(JSValue);
10412 size_t newSize = oldSize * outOfLineGrowthFactor;
10413 ASSERT(newSize == node->transition()->next->outOfLineCapacity() * sizeof(JSValue));
10414
10415 Allocator allocator = vm().jsValueGigacageAuxiliarySpace.allocatorForNonVirtual(newSize, AllocatorForMode::AllocatorIfExists);
10416
10417 if (!allocator || node->transition()->previous->couldHaveIndexingHeader()) {
10418 SpeculateCellOperand base(this, node->child1());
10419
10420 GPRReg baseGPR = base.gpr();
10421
10422 flushRegisters();
10423
10424 GPRFlushedCallResult result(this);
10425 callOperation(operationAllocateComplexPropertyStorage, result.gpr(), &vm(), baseGPR, newSize / sizeof(JSValue));
10426 m_jit.exceptionCheck();
10427
10428 storageResult(result.gpr(), node);
10429 return;
10430 }
10431
10432 StorageOperand oldStorage(this, node->child2());
10433 GPRTemporary scratch1(this);
10434 GPRTemporary scratch2(this);
10435 GPRTemporary scratch3(this);
10436
10437 GPRReg oldStorageGPR = oldStorage.gpr();
10438 GPRReg scratchGPR1 = scratch1.gpr();
10439 GPRReg scratchGPR2 = scratch2.gpr();
10440 GPRReg scratchGPR3 = scratch3.gpr();
10441
10442 JITCompiler::JumpList slowPath;
10443 m_jit.emitAllocate(scratchGPR1, JITAllocator::constant(allocator), scratchGPR2, scratchGPR3, slowPath);
10444
10445 m_jit.addPtr(JITCompiler::TrustedImm32(newSize + sizeof(IndexingHeader)), scratchGPR1);
10446
10447 addSlowPathGenerator(
10448 slowPathCall(slowPath, this, operationAllocateSimplePropertyStorage, scratchGPR1, &vm(), newSize / sizeof(JSValue)));
10449
10450 for (ptrdiff_t offset = oldSize; offset < static_cast<ptrdiff_t>(newSize); offset += sizeof(void*))
10451 m_jit.storePtr(TrustedImmPtr(nullptr), JITCompiler::Address(scratchGPR1, -(offset + sizeof(JSValue) + sizeof(void*))));
10452
10453 // We have scratchGPR1 = new storage, scratchGPR2 = scratch
10454 for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(oldSize); offset += sizeof(void*)) {
10455 m_jit.loadPtr(JITCompiler::Address(oldStorageGPR, -(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
10456 m_jit.storePtr(scratchGPR2, JITCompiler::Address(scratchGPR1, -(offset + sizeof(JSValue) + sizeof(void*))));
10457 }
10458
10459 storageResult(scratchGPR1, node);
10460}
10461
10462void SpeculativeJIT::compileNukeStructureAndSetButterfly(Node* node)
10463{
10464 SpeculateCellOperand base(this, node->child1());
10465 StorageOperand storage(this, node->child2());
10466
10467 GPRReg baseGPR = base.gpr();
10468 GPRReg storageGPR = storage.gpr();
10469
10470 m_jit.nukeStructureAndStoreButterfly(vm(), storageGPR, baseGPR);
10471
10472 noResult(node);
10473}
10474
10475void SpeculativeJIT::compileGetButterfly(Node* node)
10476{
10477 SpeculateCellOperand base(this, node->child1());
10478 GPRTemporary result(this, Reuse, base);
10479
10480 GPRReg baseGPR = base.gpr();
10481 GPRReg resultGPR = result.gpr();
10482
10483 m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
10484
10485 storageResult(resultGPR, node);
10486}
10487
10488static void allocateTemporaryRegistersForSnippet(SpeculativeJIT* jit, Vector<GPRTemporary>& gpHolders, Vector<FPRTemporary>& fpHolders, Vector<GPRReg>& gpScratch, Vector<FPRReg>& fpScratch, Snippet& snippet)
10489{
10490 for (unsigned i = 0; i < snippet.numGPScratchRegisters; ++i) {
10491 GPRTemporary temporary(jit);
10492 gpScratch.append(temporary.gpr());
10493 gpHolders.append(WTFMove(temporary));
10494 }
10495
10496 for (unsigned i = 0; i < snippet.numFPScratchRegisters; ++i) {
10497 FPRTemporary temporary(jit);
10498 fpScratch.append(temporary.fpr());
10499 fpHolders.append(WTFMove(temporary));
10500 }
10501}
10502
10503void SpeculativeJIT::compileCallDOM(Node* node)
10504{
10505 const DOMJIT::Signature* signature = node->signature();
10506
10507 // FIXME: We should have a way to call functions with the vector of registers.
10508 // https://bugs.webkit.org/show_bug.cgi?id=163099
10509 Vector<Variant<SpeculateCellOperand, SpeculateInt32Operand, SpeculateBooleanOperand>, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> operands;
10510 Vector<GPRReg, JSC_DOMJIT_SIGNATURE_MAX_ARGUMENTS_INCLUDING_THIS> regs;
10511
10512 auto appendCell = [&](Edge& edge) {
10513 SpeculateCellOperand operand(this, edge);
10514 regs.append(operand.gpr());
10515 operands.append(WTFMove(operand));
10516 };
10517
10518 auto appendString = [&](Edge& edge) {
10519 SpeculateCellOperand operand(this, edge);
10520 GPRReg gpr = operand.gpr();
10521 regs.append(gpr);
10522 speculateString(edge, gpr);
10523 operands.append(WTFMove(operand));
10524 };
10525
10526 auto appendInt32 = [&](Edge& edge) {
10527 SpeculateInt32Operand operand(this, edge);
10528 regs.append(operand.gpr());
10529 operands.append(WTFMove(operand));
10530 };
10531
10532 auto appendBoolean = [&](Edge& edge) {
10533 SpeculateBooleanOperand operand(this, edge);
10534 regs.append(operand.gpr());
10535 operands.append(WTFMove(operand));
10536 };
10537
10538 unsigned index = 0;
10539 m_jit.graph().doToChildren(node, [&](Edge edge) {
10540 if (!index)
10541 appendCell(edge);
10542 else {
10543 switch (signature->arguments[index - 1]) {
10544 case SpecString:
10545 appendString(edge);
10546 break;
10547 case SpecInt32Only:
10548 appendInt32(edge);
10549 break;
10550 case SpecBoolean:
10551 appendBoolean(edge);
10552 break;
10553 default:
10554 RELEASE_ASSERT_NOT_REACHED();
10555 break;
10556 }
10557 }
10558 ++index;
10559 });
10560
10561 JSValueRegsTemporary result(this);
10562 JSValueRegs resultRegs = result.regs();
10563
10564 flushRegisters();
10565
10566 // FIXME: Revisit JSGlobalObject.
10567 // https://bugs.webkit.org/show_bug.cgi?id=203204
10568 auto function = CFunctionPtr(signature->functionWithoutTypeCheck);
10569 unsigned argumentCountIncludingThis = signature->argumentCount + 1;
10570 switch (argumentCountIncludingThis) {
10571 case 1:
10572 callOperation(reinterpret_cast<J_JITOperation_GP>(function.get()), extractResult(resultRegs), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), regs[0]);
10573 break;
10574 case 2:
10575 callOperation(reinterpret_cast<J_JITOperation_GPP>(function.get()), extractResult(resultRegs), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), regs[0], regs[1]);
10576 break;
10577 case 3:
10578 callOperation(reinterpret_cast<J_JITOperation_GPPP>(function.get()), extractResult(resultRegs), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), regs[0], regs[1], regs[2]);
10579 break;
10580 default:
10581 RELEASE_ASSERT_NOT_REACHED();
10582 break;
10583 }
10584
10585 m_jit.exceptionCheck();
10586 jsValueResult(resultRegs, node);
10587}
10588
10589void SpeculativeJIT::compileCallDOMGetter(Node* node)
10590{
10591 DOMJIT::CallDOMGetterSnippet* snippet = node->callDOMGetterData()->snippet;
10592 if (!snippet) {
10593 FunctionPtr<CustomAccessorPtrTag> getter = node->callDOMGetterData()->customAccessorGetter;
10594 SpeculateCellOperand base(this, node->child1());
10595 JSValueRegsTemporary result(this);
10596
10597 JSValueRegs resultRegs = result.regs();
10598 GPRReg baseGPR = base.gpr();
10599
10600 flushRegisters();
10601 if (Options::useJITCage())
10602 m_jit.setupArguments<J_JITOperation_GJIP>(TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), CCallHelpers::CellValue(baseGPR), identifierUID(node->callDOMGetterData()->identifierNumber), getter.executableAddress());
10603 else
10604 m_jit.setupArguments<J_JITOperation_GJI>(TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), CCallHelpers::CellValue(baseGPR), identifierUID(node->callDOMGetterData()->identifierNumber));
10605
10606 m_jit.storePtr(GPRInfo::callFrameRegister, &vm().topCallFrame);
10607 m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
10608 if (Options::useJITCage())
10609 m_jit.appendCall(vmEntryCustomGetter);
10610 else {
10611 FunctionPtr<OperationPtrTag> bypassedFunction = FunctionPtr<OperationPtrTag>(MacroAssemblerCodePtr<OperationPtrTag>(WTF::tagNativeCodePtrImpl<OperationPtrTag>(WTF::untagNativeCodePtrImpl<CustomAccessorPtrTag>(getter.executableAddress()))));
10612 m_jit.appendOperationCall(bypassedFunction);
10613 }
10614 m_jit.setupResults(resultRegs);
10615
10616 m_jit.exceptionCheck();
10617 jsValueResult(resultRegs, node);
10618 return;
10619 }
10620
10621 Vector<GPRReg> gpScratch;
10622 Vector<FPRReg> fpScratch;
10623 Vector<SnippetParams::Value> regs;
10624
10625 JSValueRegsTemporary result(this);
10626 regs.append(result.regs());
10627
10628 Edge& baseEdge = node->child1();
10629 SpeculateCellOperand base(this, baseEdge);
10630 regs.append(SnippetParams::Value(base.gpr(), m_state.forNode(baseEdge).value()));
10631
10632 std::optional<SpeculateCellOperand> globalObject;
10633 if (snippet->requireGlobalObject) {
10634 Edge& globalObjectEdge = node->child2();
10635 globalObject.emplace(this, globalObjectEdge);
10636 regs.append(SnippetParams::Value(globalObject->gpr(), m_state.forNode(globalObjectEdge).value()));
10637 }
10638
10639 Vector<GPRTemporary> gpTempraries;
10640 Vector<FPRTemporary> fpTempraries;
10641 allocateTemporaryRegistersForSnippet(this, gpTempraries, fpTempraries, gpScratch, fpScratch, *snippet);
10642 SnippetParams params(this, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
10643 snippet->generator()->run(m_jit, params);
10644 jsValueResult(result.regs(), node);
10645}
10646
10647void SpeculativeJIT::compileCheckJSCast(Node* node)
10648{
10649 DFG_ASSERT(m_jit.graph(), node, node->op() == CheckJSCast || node->op() == CheckNotJSCast);
10650 const ClassInfo* classInfo = node->classInfo();
10651 if (classInfo->inheritsJSTypeRange) {
10652 SpeculateCellOperand base(this, node->child1());
10653 GPRReg baseGPR = base.gpr();
10654
10655 CCallHelpers::Jump checkFailed;
10656 if (node->op() == CheckJSCast)
10657 checkFailed = m_jit.branchIfNotType(baseGPR, classInfo->inheritsJSTypeRange.value());
10658 else
10659 checkFailed = m_jit.branchIfType(baseGPR, classInfo->inheritsJSTypeRange.value());
10660 speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node->child1(), checkFailed);
10661 noResult(node);
10662 return;
10663 }
10664
10665 if (!classInfo->checkSubClassSnippet) {
10666 SpeculateCellOperand base(this, node->child1());
10667 GPRTemporary other(this);
10668 GPRTemporary specified(this);
10669
10670 GPRReg baseGPR = base.gpr();
10671 GPRReg otherGPR = other.gpr();
10672 GPRReg specifiedGPR = specified.gpr();
10673
10674 m_jit.emitLoadStructure(vm(), baseGPR, otherGPR, specifiedGPR);
10675 m_jit.loadPtr(CCallHelpers::Address(otherGPR, Structure::classInfoOffset()), otherGPR);
10676 m_jit.move(CCallHelpers::TrustedImmPtr(node->classInfo()), specifiedGPR);
10677
10678 CCallHelpers::Label loop = m_jit.label();
10679 auto found = m_jit.branchPtr(CCallHelpers::Equal, otherGPR, specifiedGPR);
10680 m_jit.loadPtr(CCallHelpers::Address(otherGPR, ClassInfo::offsetOfParentClass()), otherGPR);
10681 m_jit.branchTestPtr(CCallHelpers::NonZero, otherGPR).linkTo(loop, &m_jit);
10682 if (node->op() == CheckJSCast) {
10683 speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node->child1(), m_jit.jump());
10684 found.link(&m_jit);
10685 } else {
10686 auto notFound = m_jit.jump();
10687 speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node->child1(), found);
10688 notFound.link(&m_jit);
10689 }
10690 noResult(node);
10691 return;
10692 }
10693
10694 Ref<Snippet> snippet = classInfo->checkSubClassSnippet();
10695
10696 Vector<GPRReg> gpScratch;
10697 Vector<FPRReg> fpScratch;
10698 Vector<SnippetParams::Value> regs;
10699
10700 SpeculateCellOperand base(this, node->child1());
10701 GPRReg baseGPR = base.gpr();
10702 regs.append(SnippetParams::Value(baseGPR, m_state.forNode(node->child1()).value()));
10703
10704 Vector<GPRTemporary> gpTempraries;
10705 Vector<FPRTemporary> fpTempraries;
10706 allocateTemporaryRegistersForSnippet(this, gpTempraries, fpTempraries, gpScratch, fpScratch, snippet.get());
10707
10708 SnippetParams params(this, WTFMove(regs), WTFMove(gpScratch), WTFMove(fpScratch));
10709 CCallHelpers::JumpList failureCases = snippet->generator()->run(m_jit, params);
10710 if (node->op() == CheckJSCast)
10711 speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node->child1(), failureCases);
10712 else {
10713 speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node->child1(), m_jit.jump());
10714 failureCases.link(&m_jit);
10715 }
10716 noResult(node);
10717}
10718
10719GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
10720{
10721 if (!putByValWillNeedExtraRegister(arrayMode))
10722 return InvalidGPRReg;
10723
10724 GPRTemporary realTemporary(this);
10725 temporary.adopt(realTemporary);
10726 return temporary.gpr();
10727}
10728
10729void SpeculativeJIT::compileToStringOrCallStringConstructorOrStringValueOf(Node* node)
10730{
10731 ASSERT(node->op() != StringValueOf || node->child1().useKind() == UntypedUse);
10732 switch (node->child1().useKind()) {
10733 case NotCellUse: {
10734 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
10735 JSValueRegs op1Regs = op1.jsValueRegs();
10736
10737 GPRFlushedCallResult result(this);
10738 GPRReg resultGPR = result.gpr();
10739
10740 speculateNotCell(node->child1(), op1Regs);
10741
10742 flushRegisters();
10743
10744 if (node->op() == ToString)
10745 callOperation(operationToString, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
10746 else {
10747 ASSERT(node->op() == CallStringConstructor);
10748 callOperation(operationCallStringConstructor, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
10749 }
10750 m_jit.exceptionCheck();
10751 cellResult(resultGPR, node);
10752 return;
10753 }
10754
10755 case UntypedUse: {
10756 JSValueOperand op1(this, node->child1());
10757 JSValueRegs op1Regs = op1.jsValueRegs();
10758 GPRReg op1PayloadGPR = op1Regs.payloadGPR();
10759
10760 GPRFlushedCallResult result(this);
10761 GPRReg resultGPR = result.gpr();
10762
10763 flushRegisters();
10764
10765 JITCompiler::Jump done;
10766 if (node->child1()->prediction() & SpecString) {
10767 JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs());
10768 JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR);
10769 m_jit.move(op1PayloadGPR, resultGPR);
10770 done = m_jit.jump();
10771 slowPath1.link(&m_jit);
10772 slowPath2.link(&m_jit);
10773 }
10774 if (node->op() == ToString)
10775 callOperation(operationToString, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
10776 else if (node->op() == StringValueOf)
10777 callOperation(operationStringValueOf, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
10778 else {
10779 ASSERT(node->op() == CallStringConstructor);
10780 callOperation(operationCallStringConstructor, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs);
10781 }
10782 m_jit.exceptionCheck();
10783 if (done.isSet())
10784 done.link(&m_jit);
10785 cellResult(resultGPR, node);
10786 return;
10787 }
10788
10789 case Int32Use:
10790 case Int52RepUse:
10791 case DoubleRepUse:
10792 compileNumberToStringWithValidRadixConstant(node, 10);
10793 return;
10794
10795 default:
10796 break;
10797 }
10798
10799 SpeculateCellOperand op1(this, node->child1());
10800 GPRReg op1GPR = op1.gpr();
10801
10802 switch (node->child1().useKind()) {
10803 case StringObjectUse: {
10804 GPRTemporary result(this);
10805 GPRReg resultGPR = result.gpr();
10806
10807 speculateStringObject(node->child1(), op1GPR);
10808
10809 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
10810 cellResult(resultGPR, node);
10811 break;
10812 }
10813
10814 case StringOrStringObjectUse: {
10815 GPRTemporary result(this);
10816 GPRReg resultGPR = result.gpr();
10817
10818 m_jit.load8(JITCompiler::Address(op1GPR, JSCell::typeInfoTypeOffset()), resultGPR);
10819 JITCompiler::Jump isString = m_jit.branch32(JITCompiler::Equal, resultGPR, TrustedImm32(StringType));
10820
10821 speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1().node(), m_jit.branch32(JITCompiler::NotEqual, resultGPR, TrustedImm32(StringObjectType)));
10822 m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
10823 JITCompiler::Jump done = m_jit.jump();
10824
10825 isString.link(&m_jit);
10826 m_jit.move(op1GPR, resultGPR);
10827 done.link(&m_jit);
10828
10829 m_interpreter.filter(node->child1(), SpecString | SpecStringObject);
10830
10831 cellResult(resultGPR, node);
10832 break;
10833 }
10834
10835 case CellUse: {
10836 GPRFlushedCallResult result(this);
10837 GPRReg resultGPR = result.gpr();
10838
10839 // We flush registers instead of silent spill/fill because in this mode we
10840 // believe that most likely the input is not a string, and we need to take
10841 // slow path.
10842 flushRegisters();
10843 JITCompiler::Jump done;
10844 if (node->child1()->prediction() & SpecString) {
10845 JITCompiler::Jump needCall = m_jit.branchIfNotString(op1GPR);
10846 m_jit.move(op1GPR, resultGPR);
10847 done = m_jit.jump();
10848 needCall.link(&m_jit);
10849 }
10850 if (node->op() == ToString)
10851 callOperation(operationToStringOnCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1GPR);
10852 else {
10853 ASSERT(node->op() == CallStringConstructor);
10854 callOperation(operationCallStringConstructorOnCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1GPR);
10855 }
10856 m_jit.exceptionCheck();
10857 if (done.isSet())
10858 done.link(&m_jit);
10859 cellResult(resultGPR, node);
10860 break;
10861 }
10862
10863 default:
10864 RELEASE_ASSERT_NOT_REACHED();
10865 }
10866}
10867
10868static void getExecutable(JITCompiler& jit, GPRReg functionGPR, GPRReg resultGPR)
10869{
10870 jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutableOrRareData()), resultGPR);
10871 auto hasExecutable = jit.branchTestPtr(CCallHelpers::Zero, resultGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag));
10872 jit.loadPtr(CCallHelpers::Address(resultGPR, FunctionRareData::offsetOfExecutable() - JSFunction::rareDataTag), resultGPR);
10873 hasExecutable.link(&jit);
10874}
10875
10876void SpeculativeJIT::compileFunctionToString(Node* node)
10877{
10878 SpeculateCellOperand function(this, node->child1());
10879 GPRTemporary executable(this);
10880 GPRTemporary result(this);
10881 JITCompiler::JumpList slowCases;
10882
10883 speculateFunction(node->child1(), function.gpr());
10884
10885 m_jit.emitLoadStructure(vm(), function.gpr(), result.gpr(), executable.gpr());
10886 m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::classInfoOffset()), result.gpr());
10887 static_assert(std::is_final_v<JSBoundFunction>, "We don't handle subclasses when comparing classInfo below");
10888 slowCases.append(m_jit.branchPtr(CCallHelpers::Equal, result.gpr(), TrustedImmPtr(JSBoundFunction::info())));
10889
10890 getExecutable(m_jit, function.gpr(), executable.gpr());
10891 JITCompiler::Jump isNativeExecutable = m_jit.branch8(JITCompiler::Equal, JITCompiler::Address(executable.gpr(), JSCell::typeInfoTypeOffset()), TrustedImm32(NativeExecutableType));
10892
10893 m_jit.loadPtr(MacroAssembler::Address(executable.gpr(), FunctionExecutable::offsetOfRareData()), result.gpr());
10894 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
10895 m_jit.loadPtr(MacroAssembler::Address(result.gpr(), FunctionExecutable::offsetOfAsStringInRareData()), result.gpr());
10896 JITCompiler::Jump continuation = m_jit.jump();
10897
10898 isNativeExecutable.link(&m_jit);
10899 m_jit.loadPtr(MacroAssembler::Address(executable.gpr(), NativeExecutable::offsetOfAsString()), result.gpr());
10900
10901 continuation.link(&m_jit);
10902 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()));
10903
10904 addSlowPathGenerator(slowPathCall(slowCases, this, operationFunctionToString, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), function.gpr()));
10905
10906 cellResult(result.gpr(), node);
10907}
10908
10909void SpeculativeJIT::compileNumberToStringWithValidRadixConstant(Node* node)
10910{
10911 compileNumberToStringWithValidRadixConstant(node, node->validRadixConstant());
10912}
10913
10914void SpeculativeJIT::compileNumberToStringWithValidRadixConstant(Node* node, int32_t radix)
10915{
10916 auto callToString = [&] (auto operation, GPRReg resultGPR, auto valueReg) {
10917 flushRegisters();
10918 callOperation(operation, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueReg, TrustedImm32(radix));
10919 m_jit.exceptionCheck();
10920 cellResult(resultGPR, node);
10921 };
10922
10923 switch (node->child1().useKind()) {
10924 case Int32Use: {
10925 SpeculateStrictInt32Operand value(this, node->child1());
10926 GPRFlushedCallResult result(this);
10927 callToString(operationInt32ToStringWithValidRadix, result.gpr(), value.gpr());
10928 break;
10929 }
10930
10931#if USE(JSVALUE64)
10932 case Int52RepUse: {
10933 SpeculateStrictInt52Operand value(this, node->child1());
10934 GPRFlushedCallResult result(this);
10935 callToString(operationInt52ToStringWithValidRadix, result.gpr(), value.gpr());
10936 break;
10937 }
10938#endif
10939
10940 case DoubleRepUse: {
10941 SpeculateDoubleOperand value(this, node->child1());
10942 GPRFlushedCallResult result(this);
10943 callToString(operationDoubleToStringWithValidRadix, result.gpr(), value.fpr());
10944 break;
10945 }
10946
10947 default:
10948 RELEASE_ASSERT_NOT_REACHED();
10949 }
10950}
10951
10952void SpeculativeJIT::compileNumberToStringWithRadix(Node* node)
10953{
10954 bool validRadixIsGuaranteed = false;
10955 if (node->child2()->isInt32Constant()) {
10956 int32_t radix = node->child2()->asInt32();
10957 if (radix >= 2 && radix <= 36)
10958 validRadixIsGuaranteed = true;
10959 }
10960
10961 auto callToString = [&] (auto operation, GPRReg resultGPR, auto valueReg, GPRReg radixGPR) {
10962 flushRegisters();
10963 callOperation(operation, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueReg, radixGPR);
10964 m_jit.exceptionCheck();
10965 cellResult(resultGPR, node);
10966 };
10967
10968 switch (node->child1().useKind()) {
10969 case Int32Use: {
10970 SpeculateStrictInt32Operand value(this, node->child1());
10971 SpeculateStrictInt32Operand radix(this, node->child2());
10972 GPRFlushedCallResult result(this);
10973 callToString(validRadixIsGuaranteed ? operationInt32ToStringWithValidRadix : operationInt32ToString, result.gpr(), value.gpr(), radix.gpr());
10974 break;
10975 }
10976
10977#if USE(JSVALUE64)
10978 case Int52RepUse: {
10979 SpeculateStrictInt52Operand value(this, node->child1());
10980 SpeculateStrictInt32Operand radix(this, node->child2());
10981 GPRFlushedCallResult result(this);
10982 callToString(validRadixIsGuaranteed ? operationInt52ToStringWithValidRadix : operationInt52ToString, result.gpr(), value.gpr(), radix.gpr());
10983 break;
10984 }
10985#endif
10986
10987 case DoubleRepUse: {
10988 SpeculateDoubleOperand value(this, node->child1());
10989 SpeculateStrictInt32Operand radix(this, node->child2());
10990 GPRFlushedCallResult result(this);
10991 callToString(validRadixIsGuaranteed ? operationDoubleToStringWithValidRadix : operationDoubleToString, result.gpr(), value.fpr(), radix.gpr());
10992 break;
10993 }
10994
10995 default:
10996 RELEASE_ASSERT_NOT_REACHED();
10997 }
10998}
10999
11000void SpeculativeJIT::compileNewStringObject(Node* node)
11001{
11002 SpeculateCellOperand operand(this, node->child1());
11003
11004 GPRTemporary result(this);
11005 GPRTemporary scratch1(this);
11006 GPRTemporary scratch2(this);
11007
11008 GPRReg operandGPR = operand.gpr();
11009 GPRReg resultGPR = result.gpr();
11010 GPRReg scratch1GPR = scratch1.gpr();
11011 GPRReg scratch2GPR = scratch2.gpr();
11012
11013 JITCompiler::JumpList slowPath;
11014
11015 auto butterfly = TrustedImmPtr(nullptr);
11016 emitAllocateJSObject<StringObject>(
11017 resultGPR, TrustedImmPtr(node->structure()), butterfly, scratch1GPR, scratch2GPR,
11018 slowPath);
11019
11020#if USE(JSVALUE64)
11021 m_jit.store64(
11022 operandGPR, JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset()));
11023#else
11024 m_jit.store32(
11025 TrustedImm32(JSValue::CellTag),
11026 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
11027 m_jit.store32(
11028 operandGPR,
11029 JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
11030#endif
11031
11032 m_jit.mutatorFence(vm());
11033
11034 addSlowPathGenerator(slowPathCall(
11035 slowPath, this, operationNewStringObject, resultGPR, &vm(), operandGPR, node->structure()));
11036
11037 cellResult(resultGPR, node);
11038}
11039
11040void SpeculativeJIT::compileNewSymbol(Node* node)
11041{
11042 if (!node->child1()) {
11043 flushRegisters();
11044 GPRFlushedCallResult result(this);
11045 GPRReg resultGPR = result.gpr();
11046 callOperation(operationNewSymbol, resultGPR, &vm());
11047 m_jit.exceptionCheck();
11048 cellResult(resultGPR, node);
11049 return;
11050 }
11051
11052
11053 ASSERT(node->child1().useKind() == KnownStringUse);
11054 SpeculateCellOperand operand(this, node->child1());
11055
11056 GPRReg stringGPR = operand.gpr();
11057
11058 flushRegisters();
11059 GPRFlushedCallResult result(this);
11060 GPRReg resultGPR = result.gpr();
11061 callOperation(operationNewSymbolWithDescription, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR);
11062 m_jit.exceptionCheck();
11063 cellResult(resultGPR, node);
11064}
11065
11066void SpeculativeJIT::compileNewTypedArrayWithSize(Node* node)
11067{
11068 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
11069 auto typedArrayType = node->typedArrayType();
11070 RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->typedArrayStructureConcurrently(typedArrayType));
11071 RELEASE_ASSERT(structure.get());
11072
11073 SpeculateInt32Operand size(this, node->child1());
11074 GPRReg sizeGPR = size.gpr();
11075
11076 GPRTemporary result(this);
11077 GPRTemporary storage(this);
11078 GPRTemporary scratch(this);
11079 GPRTemporary scratch2(this);
11080 GPRReg resultGPR = result.gpr();
11081 GPRReg storageGPR = storage.gpr();
11082 GPRReg scratchGPR = scratch.gpr();
11083 GPRReg scratchGPR2 = scratch2.gpr();
11084
11085 JITCompiler::JumpList slowCases;
11086
11087 m_jit.move(TrustedImmPtr(nullptr), storageGPR);
11088
11089 slowCases.append(m_jit.branch32(
11090 MacroAssembler::Above, sizeGPR, TrustedImm32(JSArrayBufferView::fastSizeLimit)));
11091
11092 m_jit.move(sizeGPR, scratchGPR);
11093 m_jit.lshift32(TrustedImm32(logElementSize(typedArrayType)), scratchGPR);
11094 if (elementSize(typedArrayType) < 8) {
11095 m_jit.add32(TrustedImm32(7), scratchGPR);
11096 m_jit.and32(TrustedImm32(~7), scratchGPR);
11097 }
11098 m_jit.emitAllocateVariableSized(
11099 storageGPR, vm().primitiveGigacageAuxiliarySpace, scratchGPR, scratchGPR,
11100 scratchGPR2, slowCases);
11101
11102 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, sizeGPR);
11103 m_jit.move(sizeGPR, scratchGPR);
11104 if (elementSize(typedArrayType) != 4) {
11105 if (elementSize(typedArrayType) > 4)
11106 m_jit.lshift32(TrustedImm32(logElementSize(typedArrayType) - 2), scratchGPR);
11107 else {
11108 if (elementSize(typedArrayType) > 1)
11109 m_jit.lshift32(TrustedImm32(logElementSize(typedArrayType)), scratchGPR);
11110 m_jit.add32(TrustedImm32(3), scratchGPR);
11111 m_jit.urshift32(TrustedImm32(2), scratchGPR);
11112 }
11113 }
11114 MacroAssembler::Label loop = m_jit.label();
11115 m_jit.sub32(TrustedImm32(1), scratchGPR);
11116 m_jit.store32(
11117 TrustedImm32(0),
11118 MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesFour));
11119 m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
11120 done.link(&m_jit);
11121#if CPU(ARM64E)
11122 // sizeGPR is still boxed as a number and there is no 32-bit variant of the PAC instructions.
11123 m_jit.zeroExtend32ToWord(sizeGPR, scratchGPR);
11124 m_jit.tagArrayPtr(scratchGPR, storageGPR);
11125#endif
11126
11127 auto butterfly = TrustedImmPtr(nullptr);
11128 switch (typedArrayType) {
11129#define TYPED_ARRAY_TYPE_CASE(name) \
11130 case Type ## name: \
11131 emitAllocateJSObject<JS##name##Array>(resultGPR, TrustedImmPtr(structure), butterfly, scratchGPR, scratchGPR2, slowCases); \
11132 break;
11133 FOR_EACH_TYPED_ARRAY_TYPE_EXCLUDING_DATA_VIEW(TYPED_ARRAY_TYPE_CASE)
11134#undef TYPED_ARRAY_TYPE_CASE
11135 case TypeDataView:
11136 emitAllocateJSObject<JSDataView>(resultGPR, TrustedImmPtr(structure), butterfly, scratchGPR, scratchGPR2, slowCases);
11137 break;
11138 default:
11139 RELEASE_ASSERT_NOT_REACHED();
11140 break;
11141 }
11142
11143 m_jit.storePtr(
11144 storageGPR,
11145 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfVector()));
11146 m_jit.store32(
11147 sizeGPR,
11148 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfLength()));
11149 m_jit.store32(
11150 TrustedImm32(FastTypedArray),
11151 MacroAssembler::Address(resultGPR, JSArrayBufferView::offsetOfMode()));
11152
11153 m_jit.mutatorFence(vm());
11154
11155 addSlowPathGenerator(slowPathCall(
11156 slowCases, this, operationNewTypedArrayWithSizeForType(typedArrayType),
11157 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), structure, sizeGPR, storageGPR));
11158
11159 cellResult(resultGPR, node);
11160}
11161
11162void SpeculativeJIT::compileNewRegexp(Node* node)
11163{
11164 RegExp* regexp = node->castOperand<RegExp*>();
11165
11166 GPRTemporary result(this);
11167 GPRTemporary scratch1(this);
11168 GPRTemporary scratch2(this);
11169 JSValueOperand lastIndex(this, node->child1());
11170
11171 GPRReg resultGPR = result.gpr();
11172 GPRReg scratch1GPR = scratch1.gpr();
11173 GPRReg scratch2GPR = scratch2.gpr();
11174 JSValueRegs lastIndexRegs = lastIndex.jsValueRegs();
11175
11176 JITCompiler::JumpList slowPath;
11177
11178 auto structure = m_jit.graph().registerStructure(m_jit.graph().globalObjectFor(node->origin.semantic)->regExpStructure());
11179 auto butterfly = TrustedImmPtr(nullptr);
11180 emitAllocateJSObject<RegExpObject>(resultGPR, TrustedImmPtr(structure), butterfly, scratch1GPR, scratch2GPR, slowPath);
11181
11182 m_jit.storePtr(
11183 TrustedImmPtr(node->cellOperand()),
11184 CCallHelpers::Address(resultGPR, RegExpObject::offsetOfRegExpAndLastIndexIsNotWritableFlag()));
11185 m_jit.storeValue(lastIndexRegs, CCallHelpers::Address(resultGPR, RegExpObject::offsetOfLastIndex()));
11186 m_jit.mutatorFence(vm());
11187
11188 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewRegexpWithLastIndex, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), regexp, lastIndexRegs));
11189
11190 cellResult(resultGPR, node);
11191}
11192
11193void SpeculativeJIT::speculateCellTypeWithoutTypeFiltering(
11194 Edge edge, GPRReg cellGPR, JSType jsType)
11195{
11196 speculationCheck(
11197 BadType, JSValueSource::unboxedCell(cellGPR), edge,
11198 m_jit.branchIfNotType(cellGPR, jsType));
11199}
11200
11201void SpeculativeJIT::speculateCellType(
11202 Edge edge, GPRReg cellGPR, SpeculatedType specType, JSType jsType)
11203{
11204 DFG_TYPE_CHECK(
11205 JSValueSource::unboxedCell(cellGPR), edge, specType,
11206 m_jit.branchIfNotType(cellGPR, jsType));
11207}
11208
11209void SpeculativeJIT::speculateInt32(Edge edge)
11210{
11211 if (!needsTypeCheck(edge, SpecInt32Only))
11212 return;
11213
11214 (SpeculateInt32Operand(this, edge)).gpr();
11215}
11216
11217void SpeculativeJIT::speculateNumber(Edge edge)
11218{
11219 if (!needsTypeCheck(edge, SpecBytecodeNumber))
11220 return;
11221
11222 JSValueOperand value(this, edge, ManualOperandSpeculation);
11223#if USE(JSVALUE64)
11224 GPRReg gpr = value.gpr();
11225 typeCheck(
11226 JSValueRegs(gpr), edge, SpecBytecodeNumber,
11227 m_jit.branchIfNotNumber(gpr));
11228#else
11229 IGNORE_WARNINGS_BEGIN("enum-compare")
11230 static_assert(JSValue::Int32Tag >= JSValue::LowestTag, "Int32Tag is included in >= JSValue::LowestTag range.");
11231 IGNORE_WARNINGS_END
11232 GPRReg tagGPR = value.tagGPR();
11233 DFG_TYPE_CHECK(
11234 value.jsValueRegs(), edge, ~SpecInt32Only,
11235 m_jit.branchIfInt32(tagGPR));
11236 DFG_TYPE_CHECK(
11237 value.jsValueRegs(), edge, SpecBytecodeNumber,
11238 m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
11239#endif
11240}
11241
11242void SpeculativeJIT::speculateRealNumber(Edge edge)
11243{
11244 if (!needsTypeCheck(edge, SpecBytecodeRealNumber))
11245 return;
11246
11247 JSValueOperand op1(this, edge, ManualOperandSpeculation);
11248 FPRTemporary result(this);
11249
11250 JSValueRegs op1Regs = op1.jsValueRegs();
11251 FPRReg resultFPR = result.fpr();
11252
11253#if USE(JSVALUE64)
11254 GPRTemporary temp(this);
11255 GPRReg tempGPR = temp.gpr();
11256 m_jit.unboxDoubleWithoutAssertions(op1Regs.gpr(), tempGPR, resultFPR);
11257#else
11258 FPRTemporary temp(this);
11259 FPRReg tempFPR = temp.fpr();
11260 unboxDouble(op1Regs.tagGPR(), op1Regs.payloadGPR(), resultFPR, tempFPR);
11261#endif
11262
11263 JITCompiler::Jump done = m_jit.branchIfNotNaN(resultFPR);
11264
11265 typeCheck(op1Regs, edge, SpecBytecodeRealNumber, m_jit.branchIfNotInt32(op1Regs));
11266
11267 done.link(&m_jit);
11268}
11269
11270void SpeculativeJIT::speculateDoubleRepReal(Edge edge)
11271{
11272 if (!needsTypeCheck(edge, SpecDoubleReal))
11273 return;
11274
11275 SpeculateDoubleOperand operand(this, edge);
11276 FPRReg fpr = operand.fpr();
11277 typeCheck(
11278 JSValueRegs(), edge, SpecDoubleReal,
11279 m_jit.branchIfNaN(fpr));
11280}
11281
11282void SpeculativeJIT::speculateBoolean(Edge edge)
11283{
11284 if (!needsTypeCheck(edge, SpecBoolean))
11285 return;
11286
11287 (SpeculateBooleanOperand(this, edge)).gpr();
11288}
11289
11290void SpeculativeJIT::speculateCell(Edge edge)
11291{
11292 if (!needsTypeCheck(edge, SpecCellCheck))
11293 return;
11294
11295 (SpeculateCellOperand(this, edge)).gpr();
11296}
11297
11298void SpeculativeJIT::speculateCellOrOther(Edge edge)
11299{
11300 if (!needsTypeCheck(edge, SpecCellCheck | SpecOther))
11301 return;
11302
11303 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11304 GPRTemporary temp(this);
11305 GPRReg tempGPR = temp.gpr();
11306
11307 MacroAssembler::Jump ok = m_jit.branchIfCell(operand.jsValueRegs());
11308 DFG_TYPE_CHECK(
11309 operand.jsValueRegs(), edge, SpecCellCheck | SpecOther,
11310 m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
11311 ok.link(&m_jit);
11312}
11313
11314void SpeculativeJIT::speculateObject(Edge edge, GPRReg cell)
11315{
11316 DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, SpecObject, m_jit.branchIfNotObject(cell));
11317}
11318
11319void SpeculativeJIT::speculateObject(Edge edge)
11320{
11321 if (!needsTypeCheck(edge, SpecObject))
11322 return;
11323
11324 SpeculateCellOperand operand(this, edge);
11325 speculateObject(edge, operand.gpr());
11326}
11327
11328void SpeculativeJIT::speculateFunction(Edge edge, GPRReg cell)
11329{
11330 speculateCellType(edge, cell, SpecFunction, JSFunctionType);
11331}
11332
11333void SpeculativeJIT::speculateFunction(Edge edge)
11334{
11335 if (!needsTypeCheck(edge, SpecFunction))
11336 return;
11337
11338 SpeculateCellOperand operand(this, edge);
11339 speculateFunction(edge, operand.gpr());
11340}
11341
11342void SpeculativeJIT::speculateFinalObject(Edge edge, GPRReg cell)
11343{
11344 speculateCellType(edge, cell, SpecFinalObject, FinalObjectType);
11345}
11346
11347void SpeculativeJIT::speculateFinalObject(Edge edge)
11348{
11349 if (!needsTypeCheck(edge, SpecFinalObject))
11350 return;
11351
11352 SpeculateCellOperand operand(this, edge);
11353 speculateFinalObject(edge, operand.gpr());
11354}
11355
11356void SpeculativeJIT::speculateRegExpObject(Edge edge, GPRReg cell)
11357{
11358 speculateCellType(edge, cell, SpecRegExpObject, RegExpObjectType);
11359}
11360
11361void SpeculativeJIT::speculateRegExpObject(Edge edge)
11362{
11363 if (!needsTypeCheck(edge, SpecRegExpObject))
11364 return;
11365
11366 SpeculateCellOperand operand(this, edge);
11367 speculateRegExpObject(edge, operand.gpr());
11368}
11369
11370void SpeculativeJIT::speculateArray(Edge edge, GPRReg cell)
11371{
11372 speculateCellType(edge, cell, SpecArray, ArrayType);
11373}
11374
11375void SpeculativeJIT::speculateArray(Edge edge)
11376{
11377 if (!needsTypeCheck(edge, SpecArray))
11378 return;
11379
11380 SpeculateCellOperand operand(this, edge);
11381 speculateArray(edge, operand.gpr());
11382}
11383
11384void SpeculativeJIT::speculateProxyObject(Edge edge, GPRReg cell)
11385{
11386 speculateCellType(edge, cell, SpecProxyObject, ProxyObjectType);
11387}
11388
11389void SpeculativeJIT::speculateProxyObject(Edge edge)
11390{
11391 if (!needsTypeCheck(edge, SpecProxyObject))
11392 return;
11393
11394 SpeculateCellOperand operand(this, edge);
11395 speculateProxyObject(edge, operand.gpr());
11396}
11397
11398void SpeculativeJIT::speculateDerivedArray(Edge edge, GPRReg cell)
11399{
11400 speculateCellType(edge, cell, SpecDerivedArray, DerivedArrayType);
11401}
11402
11403void SpeculativeJIT::speculateDerivedArray(Edge edge)
11404{
11405 if (!needsTypeCheck(edge, SpecDerivedArray))
11406 return;
11407
11408 SpeculateCellOperand operand(this, edge);
11409 speculateDerivedArray(edge, operand.gpr());
11410}
11411
11412void SpeculativeJIT::speculatePromiseObject(Edge edge, GPRReg cell)
11413{
11414 speculateCellType(edge, cell, SpecPromiseObject, JSPromiseType);
11415}
11416
11417void SpeculativeJIT::speculatePromiseObject(Edge edge)
11418{
11419 if (!needsTypeCheck(edge, SpecPromiseObject))
11420 return;
11421
11422 SpeculateCellOperand operand(this, edge);
11423 speculatePromiseObject(edge, operand.gpr());
11424}
11425
11426void SpeculativeJIT::speculateDateObject(Edge edge, GPRReg cell)
11427{
11428 speculateCellType(edge, cell, SpecDateObject, JSDateType);
11429}
11430
11431void SpeculativeJIT::speculateDateObject(Edge edge)
11432{
11433 if (!needsTypeCheck(edge, SpecDateObject))
11434 return;
11435
11436 SpeculateCellOperand operand(this, edge);
11437 speculateDateObject(edge, operand.gpr());
11438}
11439
11440void SpeculativeJIT::speculateMapObject(Edge edge, GPRReg cell)
11441{
11442 speculateCellType(edge, cell, SpecMapObject, JSMapType);
11443}
11444
11445void SpeculativeJIT::speculateMapObject(Edge edge)
11446{
11447 if (!needsTypeCheck(edge, SpecMapObject))
11448 return;
11449
11450 SpeculateCellOperand operand(this, edge);
11451 speculateMapObject(edge, operand.gpr());
11452}
11453
11454void SpeculativeJIT::speculateSetObject(Edge edge, GPRReg cell)
11455{
11456 speculateCellType(edge, cell, SpecSetObject, JSSetType);
11457}
11458
11459void SpeculativeJIT::speculateSetObject(Edge edge)
11460{
11461 if (!needsTypeCheck(edge, SpecSetObject))
11462 return;
11463
11464 SpeculateCellOperand operand(this, edge);
11465 speculateSetObject(edge, operand.gpr());
11466}
11467
11468void SpeculativeJIT::speculateWeakMapObject(Edge edge, GPRReg cell)
11469{
11470 speculateCellType(edge, cell, SpecWeakMapObject, JSWeakMapType);
11471}
11472
11473void SpeculativeJIT::speculateWeakMapObject(Edge edge)
11474{
11475 if (!needsTypeCheck(edge, SpecWeakMapObject))
11476 return;
11477
11478 SpeculateCellOperand operand(this, edge);
11479 speculateWeakMapObject(edge, operand.gpr());
11480}
11481
11482void SpeculativeJIT::speculateWeakSetObject(Edge edge, GPRReg cell)
11483{
11484 speculateCellType(edge, cell, SpecWeakSetObject, JSWeakSetType);
11485}
11486
11487void SpeculativeJIT::speculateWeakSetObject(Edge edge)
11488{
11489 if (!needsTypeCheck(edge, SpecWeakSetObject))
11490 return;
11491
11492 SpeculateCellOperand operand(this, edge);
11493 speculateWeakSetObject(edge, operand.gpr());
11494}
11495
11496void SpeculativeJIT::speculateDataViewObject(Edge edge, GPRReg cell)
11497{
11498 speculateCellType(edge, cell, SpecDataViewObject, DataViewType);
11499}
11500
11501void SpeculativeJIT::speculateDataViewObject(Edge edge)
11502{
11503 if (!needsTypeCheck(edge, SpecDataViewObject))
11504 return;
11505
11506 SpeculateCellOperand operand(this, edge);
11507 speculateDataViewObject(edge, operand.gpr());
11508}
11509
11510void SpeculativeJIT::speculateObjectOrOther(Edge edge)
11511{
11512 if (!needsTypeCheck(edge, SpecObject | SpecOther))
11513 return;
11514
11515 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11516 GPRTemporary temp(this);
11517 GPRReg tempGPR = temp.gpr();
11518 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
11519 GPRReg gpr = operand.jsValueRegs().payloadGPR();
11520 DFG_TYPE_CHECK(
11521 operand.jsValueRegs(), edge, (~SpecCellCheck) | SpecObject, m_jit.branchIfNotObject(gpr));
11522 MacroAssembler::Jump done = m_jit.jump();
11523 notCell.link(&m_jit);
11524 DFG_TYPE_CHECK(
11525 operand.jsValueRegs(), edge, SpecCellCheck | SpecOther,
11526 m_jit.branchIfNotOther(operand.jsValueRegs(), tempGPR));
11527 done.link(&m_jit);
11528}
11529
11530void SpeculativeJIT::speculateString(Edge edge, GPRReg cell)
11531{
11532 DFG_TYPE_CHECK(
11533 JSValueSource::unboxedCell(cell), edge, SpecString | ~SpecCellCheck, m_jit.branchIfNotString(cell));
11534}
11535
11536void SpeculativeJIT::speculateStringOrOther(Edge edge, JSValueRegs regs, GPRReg scratch)
11537{
11538 JITCompiler::Jump notCell = m_jit.branchIfNotCell(regs);
11539 GPRReg cell = regs.payloadGPR();
11540 DFG_TYPE_CHECK(regs, edge, (~SpecCellCheck) | SpecString, m_jit.branchIfNotString(cell));
11541 JITCompiler::Jump done = m_jit.jump();
11542 notCell.link(&m_jit);
11543 DFG_TYPE_CHECK(regs, edge, SpecCellCheck | SpecOther, m_jit.branchIfNotOther(regs, scratch));
11544 done.link(&m_jit);
11545}
11546
11547void SpeculativeJIT::speculateStringOrOther(Edge edge)
11548{
11549 if (!needsTypeCheck(edge, SpecString | SpecOther))
11550 return;
11551
11552 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11553 GPRTemporary temp(this);
11554 JSValueRegs regs = operand.jsValueRegs();
11555 GPRReg tempGPR = temp.gpr();
11556 speculateStringOrOther(edge, regs, tempGPR);
11557}
11558
11559void SpeculativeJIT::speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage)
11560{
11561 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), storage);
11562
11563 if (!needsTypeCheck(edge, SpecStringIdent | ~SpecString))
11564 return;
11565
11566 speculationCheck(
11567 BadType, JSValueSource::unboxedCell(string), edge,
11568 m_jit.branchIfRopeStringImpl(storage));
11569 speculationCheck(
11570 BadType, JSValueSource::unboxedCell(string), edge, m_jit.branchTest32(
11571 MacroAssembler::Zero,
11572 MacroAssembler::Address(storage, StringImpl::flagsOffset()),
11573 MacroAssembler::TrustedImm32(StringImpl::flagIsAtom())));
11574
11575 m_interpreter.filter(edge, SpecStringIdent | ~SpecString);
11576}
11577
11578void SpeculativeJIT::speculateStringIdent(Edge edge, GPRReg string)
11579{
11580 if (!needsTypeCheck(edge, SpecStringIdent))
11581 return;
11582
11583 GPRTemporary temp(this);
11584 speculateStringIdentAndLoadStorage(edge, string, temp.gpr());
11585}
11586
11587void SpeculativeJIT::speculateStringIdent(Edge edge)
11588{
11589 if (!needsTypeCheck(edge, SpecStringIdent))
11590 return;
11591
11592 SpeculateCellOperand operand(this, edge);
11593 GPRReg gpr = operand.gpr();
11594 speculateString(edge, gpr);
11595 speculateStringIdent(edge, gpr);
11596}
11597
11598void SpeculativeJIT::speculateString(Edge edge)
11599{
11600 if (!needsTypeCheck(edge, SpecString))
11601 return;
11602
11603 SpeculateCellOperand operand(this, edge);
11604 speculateString(edge, operand.gpr());
11605}
11606
11607void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg cellGPR)
11608{
11609 DFG_TYPE_CHECK(JSValueSource::unboxedCell(cellGPR), edge, ~SpecCellCheck | SpecStringObject, m_jit.branchIfNotType(cellGPR, StringObjectType));
11610}
11611
11612void SpeculativeJIT::speculateStringObject(Edge edge)
11613{
11614 if (!needsTypeCheck(edge, SpecStringObject))
11615 return;
11616
11617 SpeculateCellOperand operand(this, edge);
11618 GPRReg gpr = operand.gpr();
11619 speculateStringObject(edge, gpr);
11620}
11621
11622void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
11623{
11624 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
11625 return;
11626
11627 SpeculateCellOperand operand(this, edge);
11628 GPRReg gpr = operand.gpr();
11629 if (!needsTypeCheck(edge, SpecString | SpecStringObject))
11630 return;
11631
11632 GPRTemporary typeTemp(this);
11633 GPRReg typeGPR = typeTemp.gpr();
11634
11635 m_jit.load8(JITCompiler::Address(gpr, JSCell::typeInfoTypeOffset()), typeGPR);
11636
11637 JITCompiler::Jump isString = m_jit.branch32(JITCompiler::Equal, typeGPR, TrustedImm32(StringType));
11638 speculationCheck(BadType, JSValueSource::unboxedCell(gpr), edge.node(), m_jit.branch32(JITCompiler::NotEqual, typeGPR, TrustedImm32(StringObjectType)));
11639 isString.link(&m_jit);
11640
11641 m_interpreter.filter(edge, SpecString | SpecStringObject);
11642}
11643
11644void SpeculativeJIT::speculateNotStringVar(Edge edge)
11645{
11646 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11647 GPRTemporary temp(this);
11648 GPRReg tempGPR = temp.gpr();
11649
11650 JITCompiler::Jump notCell = m_jit.branchIfNotCell(operand.jsValueRegs());
11651 GPRReg cell = operand.jsValueRegs().payloadGPR();
11652
11653 JITCompiler::Jump notString = m_jit.branchIfNotString(cell);
11654
11655 speculateStringIdentAndLoadStorage(edge, cell, tempGPR);
11656
11657 notString.link(&m_jit);
11658 notCell.link(&m_jit);
11659}
11660
11661void SpeculativeJIT::speculateNotSymbol(Edge edge)
11662{
11663 if (!needsTypeCheck(edge, ~SpecSymbol))
11664 return;
11665
11666 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11667 auto valueRegs = operand.jsValueRegs();
11668 GPRReg value = valueRegs.payloadGPR();
11669 JITCompiler::Jump notCell;
11670
11671 bool needsCellCheck = needsTypeCheck(edge, SpecCell);
11672 if (needsCellCheck)
11673 notCell = m_jit.branchIfNotCell(valueRegs);
11674
11675 speculationCheck(BadType, JSValueSource::unboxedCell(value), edge.node(), m_jit.branchIfSymbol(value));
11676
11677 if (needsCellCheck)
11678 notCell.link(&m_jit);
11679
11680 m_interpreter.filter(edge, ~SpecSymbol);
11681}
11682
11683void SpeculativeJIT::speculateSymbol(Edge edge, GPRReg cell)
11684{
11685 DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, ~SpecCellCheck | SpecSymbol, m_jit.branchIfNotSymbol(cell));
11686}
11687
11688void SpeculativeJIT::speculateSymbol(Edge edge)
11689{
11690 if (!needsTypeCheck(edge, SpecSymbol))
11691 return;
11692
11693 SpeculateCellOperand operand(this, edge);
11694 speculateSymbol(edge, operand.gpr());
11695}
11696
11697void SpeculativeJIT::speculateHeapBigInt(Edge edge, GPRReg cell)
11698{
11699 DFG_TYPE_CHECK(JSValueSource::unboxedCell(cell), edge, ~SpecCellCheck | SpecHeapBigInt, m_jit.branchIfNotHeapBigInt(cell));
11700}
11701
11702void SpeculativeJIT::speculateHeapBigInt(Edge edge)
11703{
11704 if (!needsTypeCheck(edge, SpecHeapBigInt))
11705 return;
11706
11707 SpeculateCellOperand operand(this, edge);
11708 speculateHeapBigInt(edge, operand.gpr());
11709}
11710
11711void SpeculativeJIT::speculateNotCell(Edge edge, JSValueRegs regs)
11712{
11713 DFG_TYPE_CHECK(regs, edge, ~SpecCellCheck, m_jit.branchIfCell(regs));
11714}
11715
11716void SpeculativeJIT::speculateNotCell(Edge edge)
11717{
11718 if (!needsTypeCheck(edge, ~SpecCellCheck))
11719 return;
11720
11721 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11722 speculateNotCell(edge, operand.jsValueRegs());
11723}
11724
11725void SpeculativeJIT::speculateNotCellNorBigInt(Edge edge)
11726{
11727#if USE(BIGINT32)
11728 if (!needsTypeCheck(edge, ~SpecCellCheck & ~SpecBigInt))
11729 return;
11730
11731 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11732 GPRTemporary temp(this);
11733
11734 JSValueRegs regs = operand.jsValueRegs();
11735 GPRReg tempGPR = temp.gpr();
11736
11737 DFG_TYPE_CHECK(regs, edge, ~SpecCellCheck, m_jit.branchIfCell(regs));
11738 DFG_TYPE_CHECK(regs, edge, ~SpecCellCheck & ~SpecBigInt, m_jit.branchIfBigInt32(regs, tempGPR));
11739#else
11740 speculateNotCell(edge);
11741#endif
11742}
11743
11744void SpeculativeJIT::speculateNotDouble(Edge edge, JSValueRegs regs, GPRReg tempGPR)
11745{
11746 if (!needsTypeCheck(edge, ~SpecFullDouble))
11747 return;
11748
11749 JITCompiler::Jump done;
11750
11751 bool mayBeInt32 = needsTypeCheck(edge, ~SpecInt32Only);
11752 if (mayBeInt32)
11753 done = m_jit.branchIfInt32(regs);
11754
11755 DFG_TYPE_CHECK(regs, edge, ~SpecFullDouble, m_jit.branchIfNumber(regs, tempGPR));
11756
11757 if (mayBeInt32)
11758 done.link(&m_jit);
11759}
11760
11761void SpeculativeJIT::speculateNotDouble(Edge edge)
11762{
11763 if (!needsTypeCheck(edge, ~SpecFullDouble))
11764 return;
11765
11766 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11767 GPRTemporary temp(this);
11768 JSValueRegs regs = operand.jsValueRegs();
11769 GPRReg tempGPR = temp.gpr();
11770
11771 speculateNotDouble(edge, regs, tempGPR);
11772}
11773
11774void SpeculativeJIT::speculateNeitherDoubleNorHeapBigIntNorString(Edge edge, JSValueRegs regs, GPRReg tempGPR)
11775{
11776 if (!needsTypeCheck(edge, ~(SpecFullDouble | SpecString | SpecHeapBigInt)))
11777 return;
11778
11779 MacroAssembler::JumpList done;
11780
11781 bool mayBeInt32 = needsTypeCheck(edge, ~SpecInt32Only);
11782 if (mayBeInt32)
11783 done.append(m_jit.branchIfInt32(regs));
11784
11785 DFG_TYPE_CHECK(regs, edge, ~SpecFullDouble, m_jit.branchIfNumber(regs, tempGPR));
11786
11787 bool mayNotBeCell = needsTypeCheck(edge, SpecCell);
11788 if (mayNotBeCell)
11789 done.append(m_jit.branchIfNotCell(regs));
11790
11791 DFG_TYPE_CHECK(regs, edge, ~SpecString, m_jit.branchIfString(regs.payloadGPR()));
11792 DFG_TYPE_CHECK(regs, edge, ~SpecHeapBigInt, m_jit.branchIfHeapBigInt(regs.payloadGPR()));
11793
11794 if (mayBeInt32 || mayNotBeCell)
11795 done.link(&m_jit);
11796}
11797
11798void SpeculativeJIT::speculateNeitherDoubleNorHeapBigIntNorString(Edge edge)
11799{
11800 if (!needsTypeCheck(edge, ~(SpecFullDouble | SpecHeapBigInt | SpecString)))
11801 return;
11802
11803 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11804 GPRTemporary temp(this);
11805 JSValueRegs regs = operand.jsValueRegs();
11806 GPRReg tempGPR = temp.gpr();
11807
11808 speculateNeitherDoubleNorHeapBigIntNorString(edge, regs, tempGPR);
11809}
11810
11811void SpeculativeJIT::speculateOther(Edge edge, JSValueRegs regs, GPRReg tempGPR)
11812{
11813 DFG_TYPE_CHECK(regs, edge, SpecOther, m_jit.branchIfNotOther(regs, tempGPR));
11814}
11815
11816void SpeculativeJIT::speculateOther(Edge edge, JSValueRegs regs)
11817{
11818 if (!needsTypeCheck(edge, SpecOther))
11819 return;
11820
11821 GPRTemporary temp(this);
11822 GPRReg tempGPR = temp.gpr();
11823 speculateOther(edge, regs, tempGPR);
11824}
11825
11826void SpeculativeJIT::speculateOther(Edge edge)
11827{
11828 if (!needsTypeCheck(edge, SpecOther))
11829 return;
11830
11831 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11832 speculateOther(edge, operand.jsValueRegs());
11833}
11834
11835void SpeculativeJIT::speculateMisc(Edge edge, JSValueRegs regs)
11836{
11837#if USE(JSVALUE64)
11838 DFG_TYPE_CHECK(
11839 regs, edge, SpecMisc,
11840 m_jit.branch64(MacroAssembler::Above, regs.gpr(), MacroAssembler::TrustedImm64(JSValue::MiscTag)));
11841#else
11842 IGNORE_WARNINGS_BEGIN("enum-compare")
11843 static_assert(JSValue::Int32Tag >= JSValue::UndefinedTag, "Int32Tag is included in >= JSValue::UndefinedTag range.");
11844 IGNORE_WARNINGS_END
11845 DFG_TYPE_CHECK(
11846 regs, edge, ~SpecInt32Only,
11847 m_jit.branchIfInt32(regs.tagGPR()));
11848 DFG_TYPE_CHECK(
11849 regs, edge, SpecMisc,
11850 m_jit.branch32(MacroAssembler::Below, regs.tagGPR(), MacroAssembler::TrustedImm32(JSValue::UndefinedTag)));
11851#endif
11852}
11853
11854void SpeculativeJIT::speculateMisc(Edge edge)
11855{
11856 if (!needsTypeCheck(edge, SpecMisc))
11857 return;
11858
11859 JSValueOperand operand(this, edge, ManualOperandSpeculation);
11860 speculateMisc(edge, operand.jsValueRegs());
11861}
11862
11863void SpeculativeJIT::speculate(Node*, Edge edge)
11864{
11865 switch (edge.useKind()) {
11866 case UntypedUse:
11867 break;
11868 case DoubleRepUse:
11869 case Int52RepUse:
11870 case KnownInt32Use:
11871 case KnownCellUse:
11872 case KnownStringUse:
11873 case KnownPrimitiveUse:
11874 case KnownOtherUse:
11875 case KnownBooleanUse:
11876 ASSERT(!m_interpreter.needsTypeCheck(edge));
11877 break;
11878 case Int32Use:
11879 speculateInt32(edge);
11880 break;
11881 case NumberUse:
11882 speculateNumber(edge);
11883 break;
11884 case RealNumberUse:
11885 speculateRealNumber(edge);
11886 break;
11887 case DoubleRepRealUse:
11888 speculateDoubleRepReal(edge);
11889 break;
11890#if USE(JSVALUE64)
11891 case AnyIntUse:
11892 speculateAnyInt(edge);
11893 break;
11894 case DoubleRepAnyIntUse:
11895 speculateDoubleRepAnyInt(edge);
11896 break;
11897#endif
11898 case BooleanUse:
11899 speculateBoolean(edge);
11900 break;
11901 case CellUse:
11902 speculateCell(edge);
11903 break;
11904 case CellOrOtherUse:
11905 speculateCellOrOther(edge);
11906 break;
11907 case ObjectUse:
11908 speculateObject(edge);
11909 break;
11910 case FunctionUse:
11911 speculateFunction(edge);
11912 break;
11913 case ArrayUse:
11914 speculateArray(edge);
11915 break;
11916 case FinalObjectUse:
11917 speculateFinalObject(edge);
11918 break;
11919 case RegExpObjectUse:
11920 speculateRegExpObject(edge);
11921 break;
11922 case PromiseObjectUse:
11923 speculatePromiseObject(edge);
11924 break;
11925 case ProxyObjectUse:
11926 speculateProxyObject(edge);
11927 break;
11928 case DerivedArrayUse:
11929 speculateDerivedArray(edge);
11930 break;
11931 case DateObjectUse:
11932 speculateDateObject(edge);
11933 break;
11934 case MapObjectUse:
11935 speculateMapObject(edge);
11936 break;
11937 case SetObjectUse:
11938 speculateSetObject(edge);
11939 break;
11940 case WeakMapObjectUse:
11941 speculateWeakMapObject(edge);
11942 break;
11943 case WeakSetObjectUse:
11944 speculateWeakSetObject(edge);
11945 break;
11946 case DataViewObjectUse:
11947 speculateDataViewObject(edge);
11948 break;
11949 case ObjectOrOtherUse:
11950 speculateObjectOrOther(edge);
11951 break;
11952 case StringIdentUse:
11953 speculateStringIdent(edge);
11954 break;
11955 case StringUse:
11956 speculateString(edge);
11957 break;
11958 case StringOrOtherUse:
11959 speculateStringOrOther(edge);
11960 break;
11961 case SymbolUse:
11962 speculateSymbol(edge);
11963 break;
11964#if USE(BIGINT32)
11965 case BigInt32Use:
11966 speculateBigInt32(edge);
11967 break;
11968 case AnyBigIntUse:
11969 speculateAnyBigInt(edge);
11970 break;
11971#endif
11972 case HeapBigIntUse:
11973 speculateHeapBigInt(edge);
11974 break;
11975 case StringObjectUse:
11976 speculateStringObject(edge);
11977 break;
11978 case StringOrStringObjectUse:
11979 speculateStringOrStringObject(edge);
11980 break;
11981 case NotStringVarUse:
11982 speculateNotStringVar(edge);
11983 break;
11984 case NotSymbolUse:
11985 speculateNotSymbol(edge);
11986 break;
11987 case NotCellUse:
11988 speculateNotCell(edge);
11989 break;
11990 case NotCellNorBigIntUse:
11991 speculateNotCellNorBigInt(edge);
11992 break;
11993 case NotDoubleUse:
11994 speculateNotDouble(edge);
11995 break;
11996 case NeitherDoubleNorHeapBigIntNorStringUse:
11997 speculateNeitherDoubleNorHeapBigIntNorString(edge);
11998 break;
11999 case OtherUse:
12000 speculateOther(edge);
12001 break;
12002 case MiscUse:
12003 speculateMisc(edge);
12004 break;
12005 default:
12006 RELEASE_ASSERT_NOT_REACHED();
12007 break;
12008 }
12009}
12010
12011void SpeculativeJIT::emitSwitchIntJump(
12012 SwitchData* data, GPRReg value, GPRReg scratch)
12013{
12014 const UnlinkedSimpleJumpTable& unlinkedTable = m_jit.graph().unlinkedSwitchJumpTable(data->switchTableIndex);
12015 SimpleJumpTable& linkedTable = m_jit.graph().switchJumpTable(data->switchTableIndex);
12016 linkedTable.ensureCTITable(unlinkedTable);
12017 m_jit.sub32(Imm32(unlinkedTable.m_min), value);
12018 addBranch(
12019 m_jit.branch32(JITCompiler::AboveOrEqual, value, Imm32(linkedTable.m_ctiOffsets.size())),
12020 data->fallThrough.block);
12021 m_jit.move(TrustedImmPtr(linkedTable.m_ctiOffsets.data()), scratch);
12022
12023#if USE(JSVALUE64)
12024 m_jit.farJump(JITCompiler::BaseIndex(scratch, value, JITCompiler::ScalePtr), JSSwitchPtrTag);
12025#else
12026 m_jit.loadPtr(JITCompiler::BaseIndex(scratch, value, JITCompiler::ScalePtr), scratch);
12027 m_jit.farJump(scratch, JSSwitchPtrTag);
12028#endif
12029 data->didUseJumpTable = true;
12030}
12031
12032void SpeculativeJIT::emitSwitchImm(Node* node, SwitchData* data)
12033{
12034 switch (node->child1().useKind()) {
12035 case Int32Use: {
12036 SpeculateInt32Operand value(this, node->child1());
12037 GPRTemporary temp(this);
12038 emitSwitchIntJump(data, value.gpr(), temp.gpr());
12039 noResult(node);
12040 break;
12041 }
12042
12043 case UntypedUse: {
12044 JSValueOperand value(this, node->child1());
12045 GPRTemporary temp(this);
12046 JSValueRegs valueRegs = value.jsValueRegs();
12047 GPRReg scratch = temp.gpr();
12048
12049 value.use();
12050
12051 auto notInt32 = m_jit.branchIfNotInt32(valueRegs);
12052 emitSwitchIntJump(data, valueRegs.payloadGPR(), scratch);
12053 notInt32.link(&m_jit);
12054 addBranch(m_jit.branchIfNotNumber(valueRegs, scratch), data->fallThrough.block);
12055
12056 const UnlinkedSimpleJumpTable& unlinkedTable = m_jit.graph().unlinkedSwitchJumpTable(data->switchTableIndex);
12057 silentSpillAllRegisters(scratch);
12058 callOperation(operationFindSwitchImmTargetForDouble, scratch, &vm(), valueRegs, data->switchTableIndex, unlinkedTable.m_min);
12059 silentFillAllRegisters();
12060
12061 m_jit.farJump(scratch, JSSwitchPtrTag);
12062 noResult(node, UseChildrenCalledExplicitly);
12063 break;
12064 }
12065
12066 default:
12067 RELEASE_ASSERT_NOT_REACHED();
12068 break;
12069 }
12070 ASSERT(data->didUseJumpTable);
12071}
12072
12073void SpeculativeJIT::emitSwitchCharStringJump(Node* node, SwitchData* data, GPRReg value, GPRReg scratch)
12074{
12075 m_jit.loadPtr(MacroAssembler::Address(value, JSString::offsetOfValue()), scratch);
12076 auto isRope = m_jit.branchIfRopeStringImpl(scratch);
12077 addSlowPathGenerator(slowPathCall(isRope, this, operationResolveRope, scratch, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), value));
12078
12079 addBranch(
12080 m_jit.branch32(
12081 MacroAssembler::NotEqual,
12082 MacroAssembler::Address(scratch, StringImpl::lengthMemoryOffset()),
12083 TrustedImm32(1)),
12084 data->fallThrough.block);
12085
12086 m_jit.loadPtr(MacroAssembler::Address(scratch, StringImpl::dataOffset()), value);
12087
12088 JITCompiler::Jump is8Bit = m_jit.branchTest32(
12089 MacroAssembler::NonZero,
12090 MacroAssembler::Address(scratch, StringImpl::flagsOffset()),
12091 TrustedImm32(StringImpl::flagIs8Bit()));
12092
12093 m_jit.load16(MacroAssembler::Address(value), scratch);
12094
12095 JITCompiler::Jump ready = m_jit.jump();
12096
12097 is8Bit.link(&m_jit);
12098 m_jit.load8(MacroAssembler::Address(value), scratch);
12099
12100 ready.link(&m_jit);
12101 emitSwitchIntJump(data, scratch, value);
12102}
12103
12104void SpeculativeJIT::emitSwitchChar(Node* node, SwitchData* data)
12105{
12106 switch (node->child1().useKind()) {
12107 case StringUse: {
12108 SpeculateCellOperand op1(this, node->child1());
12109 GPRTemporary temp(this);
12110
12111 GPRReg op1GPR = op1.gpr();
12112 GPRReg tempGPR = temp.gpr();
12113
12114 op1.use();
12115
12116 speculateString(node->child1(), op1GPR);
12117 emitSwitchCharStringJump(node, data, op1GPR, tempGPR);
12118 noResult(node, UseChildrenCalledExplicitly);
12119 break;
12120 }
12121
12122 case UntypedUse: {
12123 JSValueOperand op1(this, node->child1());
12124 GPRTemporary temp(this);
12125
12126 JSValueRegs op1Regs = op1.jsValueRegs();
12127 GPRReg tempGPR = temp.gpr();
12128
12129 op1.use();
12130
12131 addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
12132
12133 addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
12134
12135 emitSwitchCharStringJump(node, data, op1Regs.payloadGPR(), tempGPR);
12136 noResult(node, UseChildrenCalledExplicitly);
12137 break;
12138 }
12139
12140 default:
12141 RELEASE_ASSERT_NOT_REACHED();
12142 break;
12143 }
12144 ASSERT(data->didUseJumpTable);
12145}
12146
12147namespace {
12148
12149struct CharacterCase {
12150 bool operator<(const CharacterCase& other) const
12151 {
12152 return character < other.character;
12153 }
12154
12155 LChar character;
12156 unsigned begin;
12157 unsigned end;
12158};
12159
12160} // anonymous namespace
12161
12162void SpeculativeJIT::emitBinarySwitchStringRecurse(
12163 SwitchData* data, const Vector<SpeculativeJIT::StringSwitchCase>& cases,
12164 unsigned numChecked, unsigned begin, unsigned end, GPRReg buffer, GPRReg length,
12165 GPRReg temp, unsigned alreadyCheckedLength, bool checkedExactLength)
12166{
12167 static constexpr bool verbose = false;
12168
12169 if (verbose) {
12170 dataLog("We're down to the following cases, alreadyCheckedLength = ", alreadyCheckedLength, ":\n");
12171 for (unsigned i = begin; i < end; ++i) {
12172 dataLog(" ", cases[i].string, "\n");
12173 }
12174 }
12175
12176 if (begin == end) {
12177 jump(data->fallThrough.block, ForceJump);
12178 return;
12179 }
12180
12181 unsigned minLength = cases[begin].string->length();
12182 unsigned commonChars = minLength;
12183 bool allLengthsEqual = true;
12184 for (unsigned i = begin + 1; i < end; ++i) {
12185 unsigned myCommonChars = numChecked;
12186 for (unsigned j = numChecked;
12187 j < std::min(cases[begin].string->length(), cases[i].string->length());
12188 ++j) {
12189 if (cases[begin].string->at(j) != cases[i].string->at(j)) {
12190 if (verbose)
12191 dataLog("string(", cases[i].string, ")[", j, "] != string(", cases[begin].string, ")[", j, "]\n");
12192 break;
12193 }
12194 myCommonChars++;
12195 }
12196 commonChars = std::min(commonChars, myCommonChars);
12197 if (minLength != cases[i].string->length())
12198 allLengthsEqual = false;
12199 minLength = std::min(minLength, cases[i].string->length());
12200 }
12201
12202 if (checkedExactLength) {
12203 RELEASE_ASSERT(alreadyCheckedLength == minLength);
12204 RELEASE_ASSERT(allLengthsEqual);
12205 }
12206
12207 RELEASE_ASSERT(minLength >= commonChars);
12208
12209 if (verbose)
12210 dataLog("length = ", minLength, ", commonChars = ", commonChars, ", allLengthsEqual = ", allLengthsEqual, "\n");
12211
12212 if (!allLengthsEqual && alreadyCheckedLength < minLength)
12213 branch32(MacroAssembler::Below, length, Imm32(minLength), data->fallThrough.block);
12214 if (allLengthsEqual && (alreadyCheckedLength < minLength || !checkedExactLength))
12215 branch32(MacroAssembler::NotEqual, length, Imm32(minLength), data->fallThrough.block);
12216
12217 for (unsigned i = numChecked; i < commonChars; ++i) {
12218 branch8(
12219 MacroAssembler::NotEqual, MacroAssembler::Address(buffer, i),
12220 TrustedImm32(cases[begin].string->at(i)), data->fallThrough.block);
12221 }
12222
12223 if (minLength == commonChars) {
12224 // This is the case where one of the cases is a prefix of all of the other cases.
12225 // We've already checked that the input string is a prefix of all of the cases,
12226 // so we just check length to jump to that case.
12227
12228 if (ASSERT_ENABLED) {
12229 ASSERT(cases[begin].string->length() == commonChars);
12230 for (unsigned i = begin + 1; i < end; ++i)
12231 ASSERT(cases[i].string->length() > commonChars);
12232 }
12233
12234 if (allLengthsEqual) {
12235 RELEASE_ASSERT(end == begin + 1);
12236 jump(cases[begin].target, ForceJump);
12237 return;
12238 }
12239
12240 branch32(MacroAssembler::Equal, length, Imm32(commonChars), cases[begin].target);
12241
12242 // We've checked if the length is >= minLength, and then we checked if the
12243 // length is == commonChars. We get to this point if it is >= minLength but not
12244 // == commonChars. Hence we know that it now must be > minLength, i.e., that
12245 // it's >= minLength + 1.
12246 emitBinarySwitchStringRecurse(
12247 data, cases, commonChars, begin + 1, end, buffer, length, temp, minLength + 1, false);
12248 return;
12249 }
12250
12251 // At this point we know that the string is longer than commonChars, and we've only
12252 // verified commonChars. Use a binary switch on the next unchecked character, i.e.
12253 // string[commonChars].
12254
12255 RELEASE_ASSERT(end >= begin + 2);
12256
12257 m_jit.load8(MacroAssembler::Address(buffer, commonChars), temp);
12258
12259 Vector<CharacterCase> characterCases;
12260 CharacterCase currentCase;
12261 currentCase.character = cases[begin].string->at(commonChars);
12262 currentCase.begin = begin;
12263 currentCase.end = begin + 1;
12264 for (unsigned i = begin + 1; i < end; ++i) {
12265 if (cases[i].string->at(commonChars) != currentCase.character) {
12266 if (verbose)
12267 dataLog("string(", cases[i].string, ")[", commonChars, "] != string(", cases[begin].string, ")[", commonChars, "]\n");
12268 currentCase.end = i;
12269 characterCases.append(currentCase);
12270 currentCase.character = cases[i].string->at(commonChars);
12271 currentCase.begin = i;
12272 currentCase.end = i + 1;
12273 } else
12274 currentCase.end = i + 1;
12275 }
12276 characterCases.append(currentCase);
12277
12278 Vector<int64_t> characterCaseValues;
12279 for (unsigned i = 0; i < characterCases.size(); ++i)
12280 characterCaseValues.append(characterCases[i].character);
12281
12282 BinarySwitch binarySwitch(temp, characterCaseValues, BinarySwitch::Int32);
12283 while (binarySwitch.advance(m_jit)) {
12284 const CharacterCase& myCase = characterCases[binarySwitch.caseIndex()];
12285 emitBinarySwitchStringRecurse(
12286 data, cases, commonChars + 1, myCase.begin, myCase.end, buffer, length,
12287 temp, minLength, allLengthsEqual);
12288 }
12289
12290 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
12291}
12292
12293void SpeculativeJIT::emitSwitchStringOnString(Node* node, SwitchData* data, GPRReg string)
12294{
12295 data->didUseJumpTable = true;
12296
12297 const UnlinkedStringJumpTable& unlinkedTable = m_graph.unlinkedStringSwitchJumpTable(data->switchTableIndex);
12298 StringJumpTable& linkedTable = m_jit.graph().stringSwitchJumpTable(data->switchTableIndex);
12299 linkedTable.ensureCTITable(unlinkedTable);
12300
12301 bool canDoBinarySwitch = true;
12302 unsigned totalLength = 0;
12303
12304 for (unsigned i = data->cases.size(); i--;) {
12305 StringImpl* string = data->cases[i].value.stringImpl();
12306 if (!string->is8Bit()) {
12307 canDoBinarySwitch = false;
12308 break;
12309 }
12310 if (string->length() > Options::maximumBinaryStringSwitchCaseLength()) {
12311 canDoBinarySwitch = false;
12312 break;
12313 }
12314 totalLength += string->length();
12315 }
12316
12317 if (!canDoBinarySwitch || totalLength > Options::maximumBinaryStringSwitchTotalLength()) {
12318 flushRegisters();
12319 callOperation(operationSwitchString, string, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), static_cast<size_t>(data->switchTableIndex), &unlinkedTable, string);
12320 m_jit.exceptionCheck();
12321 m_jit.farJump(string, JSSwitchPtrTag);
12322 return;
12323 }
12324
12325 GPRTemporary length(this);
12326 GPRTemporary temp(this);
12327
12328 GPRReg lengthGPR = length.gpr();
12329 GPRReg tempGPR = temp.gpr();
12330
12331 MacroAssembler::JumpList slowCases;
12332 m_jit.loadPtr(MacroAssembler::Address(string, JSString::offsetOfValue()), tempGPR);
12333 slowCases.append(m_jit.branchIfRopeStringImpl(tempGPR));
12334 m_jit.load32(MacroAssembler::Address(tempGPR, StringImpl::lengthMemoryOffset()), lengthGPR);
12335
12336 slowCases.append(m_jit.branchTest32(
12337 MacroAssembler::Zero,
12338 MacroAssembler::Address(tempGPR, StringImpl::flagsOffset()),
12339 TrustedImm32(StringImpl::flagIs8Bit())));
12340
12341 m_jit.loadPtr(MacroAssembler::Address(tempGPR, StringImpl::dataOffset()), string);
12342
12343 Vector<StringSwitchCase> cases;
12344 for (unsigned i = 0; i < data->cases.size(); ++i) {
12345 cases.append(
12346 StringSwitchCase(data->cases[i].value.stringImpl(), data->cases[i].target.block));
12347 }
12348
12349 std::sort(cases.begin(), cases.end());
12350
12351 emitBinarySwitchStringRecurse(
12352 data, cases, 0, 0, cases.size(), string, lengthGPR, tempGPR, 0, false);
12353
12354 slowCases.link(&m_jit);
12355 silentSpillAllRegisters(string);
12356 callOperation(operationSwitchString, string, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), static_cast<size_t>(data->switchTableIndex), &unlinkedTable, string);
12357 silentFillAllRegisters();
12358 m_jit.exceptionCheck();
12359 m_jit.farJump(string, JSSwitchPtrTag);
12360}
12361
12362void SpeculativeJIT::emitSwitchString(Node* node, SwitchData* data)
12363{
12364 switch (node->child1().useKind()) {
12365 case StringIdentUse: {
12366 // Note that we do not use JumpTable in this case.
12367 SpeculateCellOperand op1(this, node->child1());
12368 GPRTemporary temp(this);
12369
12370 GPRReg op1GPR = op1.gpr();
12371 GPRReg tempGPR = temp.gpr();
12372
12373 speculateString(node->child1(), op1GPR);
12374 speculateStringIdentAndLoadStorage(node->child1(), op1GPR, tempGPR);
12375
12376 Vector<int64_t> identifierCaseValues;
12377 for (unsigned i = 0; i < data->cases.size(); ++i) {
12378 identifierCaseValues.append(
12379 static_cast<int64_t>(bitwise_cast<intptr_t>(data->cases[i].value.stringImpl())));
12380 }
12381
12382 BinarySwitch binarySwitch(tempGPR, identifierCaseValues, BinarySwitch::IntPtr);
12383 while (binarySwitch.advance(m_jit))
12384 jump(data->cases[binarySwitch.caseIndex()].target.block, ForceJump);
12385 addBranch(binarySwitch.fallThrough(), data->fallThrough.block);
12386
12387 noResult(node);
12388 break;
12389 }
12390
12391 case StringUse: {
12392 SpeculateCellOperand op1(this, node->child1());
12393
12394 GPRReg op1GPR = op1.gpr();
12395
12396 op1.use();
12397
12398 speculateString(node->child1(), op1GPR);
12399 emitSwitchStringOnString(node, data, op1GPR);
12400 noResult(node, UseChildrenCalledExplicitly);
12401 break;
12402 }
12403
12404 case UntypedUse: {
12405 JSValueOperand op1(this, node->child1());
12406
12407 JSValueRegs op1Regs = op1.jsValueRegs();
12408
12409 op1.use();
12410
12411 addBranch(m_jit.branchIfNotCell(op1Regs), data->fallThrough.block);
12412
12413 addBranch(m_jit.branchIfNotString(op1Regs.payloadGPR()), data->fallThrough.block);
12414
12415 emitSwitchStringOnString(node, data, op1Regs.payloadGPR());
12416 noResult(node, UseChildrenCalledExplicitly);
12417 break;
12418 }
12419
12420 default:
12421 RELEASE_ASSERT_NOT_REACHED();
12422 break;
12423 }
12424}
12425
12426void SpeculativeJIT::emitSwitch(Node* node)
12427{
12428 SwitchData* data = node->switchData();
12429 switch (data->kind) {
12430 case SwitchImm: {
12431 emitSwitchImm(node, data);
12432 return;
12433 }
12434 case SwitchChar: {
12435 emitSwitchChar(node, data);
12436 return;
12437 }
12438 case SwitchString: {
12439 emitSwitchString(node, data);
12440 return;
12441 }
12442 case SwitchCell: {
12443 DFG_CRASH(m_jit.graph(), node, "Bad switch kind");
12444 return;
12445 } }
12446 RELEASE_ASSERT_NOT_REACHED();
12447}
12448
12449void SpeculativeJIT::addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination)
12450{
12451 for (unsigned i = jump.jumps().size(); i--;)
12452 addBranch(jump.jumps()[i], destination);
12453}
12454
12455void SpeculativeJIT::linkBranches()
12456{
12457 for (auto& branch : m_branches)
12458 branch.jump.linkTo(m_jit.blockHeads()[branch.destination->index], &m_jit);
12459}
12460
12461void SpeculativeJIT::compileStoreBarrier(Node* node)
12462{
12463 ASSERT(node->op() == StoreBarrier || node->op() == FencedStoreBarrier);
12464
12465 bool isFenced = node->op() == FencedStoreBarrier;
12466
12467 SpeculateCellOperand base(this, node->child1());
12468 GPRTemporary scratch1(this);
12469
12470 GPRReg baseGPR = base.gpr();
12471 GPRReg scratch1GPR = scratch1.gpr();
12472
12473 JITCompiler::JumpList ok;
12474
12475 if (isFenced) {
12476 ok.append(m_jit.barrierBranch(vm(), baseGPR, scratch1GPR));
12477
12478 JITCompiler::Jump noFence = m_jit.jumpIfMutatorFenceNotNeeded(vm());
12479 m_jit.memoryFence();
12480 ok.append(m_jit.barrierBranchWithoutFence(baseGPR));
12481 noFence.link(&m_jit);
12482 } else
12483 ok.append(m_jit.barrierBranchWithoutFence(baseGPR));
12484
12485 silentSpillAllRegisters(InvalidGPRReg);
12486 callOperation(operationWriteBarrierSlowPath, &vm(), baseGPR);
12487 silentFillAllRegisters();
12488
12489 ok.link(&m_jit);
12490
12491 noResult(node);
12492}
12493
12494void SpeculativeJIT::compilePutAccessorById(Node* node)
12495{
12496 SpeculateCellOperand base(this, node->child1());
12497 SpeculateCellOperand accessor(this, node->child2());
12498
12499 GPRReg baseGPR = base.gpr();
12500 GPRReg accessorGPR = accessor.gpr();
12501
12502 flushRegisters();
12503 callOperation(node->op() == PutGetterById ? operationPutGetterById : operationPutSetterById, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), accessorGPR);
12504 m_jit.exceptionCheck();
12505
12506 noResult(node);
12507}
12508
12509void SpeculativeJIT::compilePutGetterSetterById(Node* node)
12510{
12511 SpeculateCellOperand base(this, node->child1());
12512 JSValueOperand getter(this, node->child2());
12513 JSValueOperand setter(this, node->child3());
12514
12515#if USE(JSVALUE64)
12516 GPRReg baseGPR = base.gpr();
12517 GPRReg getterGPR = getter.gpr();
12518 GPRReg setterGPR = setter.gpr();
12519
12520 flushRegisters();
12521 callOperation(operationPutGetterSetter, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterGPR, setterGPR);
12522#else
12523 // These JSValues may be JSUndefined OR JSFunction*.
12524 // At that time,
12525 // 1. If the JSValue is JSUndefined, its payload becomes nullptr.
12526 // 2. If the JSValue is JSFunction*, its payload becomes JSFunction*.
12527 // So extract payload and pass it to operationPutGetterSetter. This hack is used as the same way in baseline JIT.
12528 GPRReg baseGPR = base.gpr();
12529 JSValueRegs getterRegs = getter.jsValueRegs();
12530 JSValueRegs setterRegs = setter.jsValueRegs();
12531
12532 flushRegisters();
12533 callOperation(operationPutGetterSetter, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, identifierUID(node->identifierNumber()), node->accessorAttributes(), getterRegs.payloadGPR(), setterRegs.payloadGPR());
12534#endif
12535 m_jit.exceptionCheck();
12536
12537 noResult(node);
12538}
12539
12540void SpeculativeJIT::compileResolveScope(Node* node)
12541{
12542 SpeculateCellOperand scope(this, node->child1());
12543 GPRReg scopeGPR = scope.gpr();
12544 GPRFlushedCallResult result(this);
12545 GPRReg resultGPR = result.gpr();
12546 flushRegisters();
12547 callOperation(operationResolveScope, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), scopeGPR, identifierUID(node->identifierNumber()));
12548 m_jit.exceptionCheck();
12549 cellResult(resultGPR, node);
12550}
12551
12552void SpeculativeJIT::compileResolveScopeForHoistingFuncDeclInEval(Node* node)
12553{
12554 SpeculateCellOperand scope(this, node->child1());
12555 GPRReg scopeGPR = scope.gpr();
12556 flushRegisters();
12557 JSValueRegsFlushedCallResult result(this);
12558 JSValueRegs resultRegs = result.regs();
12559 callOperation(operationResolveScopeForHoistingFuncDeclInEval, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), scopeGPR, identifierUID(node->identifierNumber()));
12560 m_jit.exceptionCheck();
12561 jsValueResult(resultRegs, node);
12562}
12563
12564void SpeculativeJIT::compileGetGlobalVariable(Node* node)
12565{
12566 JSValueRegsTemporary result(this);
12567 JSValueRegs resultRegs = result.regs();
12568 m_jit.loadValue(node->variablePointer(), resultRegs);
12569 jsValueResult(resultRegs, node);
12570}
12571
12572void SpeculativeJIT::compilePutGlobalVariable(Node* node)
12573{
12574 JSValueOperand value(this, node->child2());
12575 JSValueRegs valueRegs = value.jsValueRegs();
12576 m_jit.storeValue(valueRegs, node->variablePointer());
12577 noResult(node);
12578}
12579
12580void SpeculativeJIT::compileGetDynamicVar(Node* node)
12581{
12582 SpeculateCellOperand scope(this, node->child1());
12583 GPRReg scopeGPR = scope.gpr();
12584 flushRegisters();
12585 JSValueRegsFlushedCallResult result(this);
12586 JSValueRegs resultRegs = result.regs();
12587 callOperation(operationGetDynamicVar, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), scopeGPR, identifierUID(node->identifierNumber()), node->getPutInfo());
12588 m_jit.exceptionCheck();
12589 jsValueResult(resultRegs, node);
12590}
12591
12592void SpeculativeJIT::compilePutDynamicVar(Node* node)
12593{
12594 SpeculateCellOperand scope(this, node->child1());
12595 JSValueOperand value(this, node->child2());
12596
12597 GPRReg scopeGPR = scope.gpr();
12598 JSValueRegs valueRegs = value.jsValueRegs();
12599
12600 flushRegisters();
12601 callOperation(node->ecmaMode().isStrict() ? operationPutDynamicVarStrict : operationPutDynamicVarNonStrict, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), scopeGPR, valueRegs, identifierUID(node->identifierNumber()), node->getPutInfo());
12602 m_jit.exceptionCheck();
12603 noResult(node);
12604}
12605
12606void SpeculativeJIT::compileGetClosureVar(Node* node)
12607{
12608 SpeculateCellOperand base(this, node->child1());
12609 JSValueRegsTemporary result(this);
12610
12611 GPRReg baseGPR = base.gpr();
12612 JSValueRegs resultRegs = result.regs();
12613
12614 m_jit.loadValue(JITCompiler::Address(baseGPR, JSLexicalEnvironment::offsetOfVariable(node->scopeOffset())), resultRegs);
12615 jsValueResult(resultRegs, node);
12616}
12617
12618void SpeculativeJIT::compilePutClosureVar(Node* node)
12619{
12620 SpeculateCellOperand base(this, node->child1());
12621 JSValueOperand value(this, node->child2());
12622
12623 GPRReg baseGPR = base.gpr();
12624 JSValueRegs valueRegs = value.jsValueRegs();
12625
12626 m_jit.storeValue(valueRegs, JITCompiler::Address(baseGPR, JSLexicalEnvironment::offsetOfVariable(node->scopeOffset())));
12627 noResult(node);
12628}
12629
12630void SpeculativeJIT::compileGetInternalField(Node* node)
12631{
12632 SpeculateCellOperand base(this, node->child1());
12633 JSValueRegsTemporary result(this);
12634
12635 GPRReg baseGPR = base.gpr();
12636 JSValueRegs resultRegs = result.regs();
12637
12638 m_jit.loadValue(JITCompiler::Address(baseGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(node->internalFieldIndex())), resultRegs);
12639 jsValueResult(resultRegs, node);
12640}
12641
12642void SpeculativeJIT::compilePutInternalField(Node* node)
12643{
12644 SpeculateCellOperand base(this, node->child1());
12645 JSValueOperand value(this, node->child2());
12646
12647 GPRReg baseGPR = base.gpr();
12648 JSValueRegs valueRegs = value.jsValueRegs();
12649
12650 m_jit.storeValue(valueRegs, JITCompiler::Address(baseGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(node->internalFieldIndex())));
12651 noResult(node);
12652}
12653
12654void SpeculativeJIT::compilePutAccessorByVal(Node* node)
12655{
12656 SpeculateCellOperand base(this, node->child1());
12657 JSValueOperand subscript(this, node->child2());
12658 SpeculateCellOperand accessor(this, node->child3());
12659
12660 auto operation = node->op() == PutGetterByVal ? operationPutGetterByVal : operationPutSetterByVal;
12661
12662 GPRReg baseGPR = base.gpr();
12663 JSValueRegs subscriptRegs = subscript.jsValueRegs();
12664 GPRReg accessorGPR = accessor.gpr();
12665
12666 flushRegisters();
12667 callOperation(operation, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, subscriptRegs, node->accessorAttributes(), accessorGPR);
12668 m_jit.exceptionCheck();
12669
12670 noResult(node);
12671}
12672
12673void SpeculativeJIT::compileGetRegExpObjectLastIndex(Node* node)
12674{
12675 SpeculateCellOperand regExp(this, node->child1());
12676 JSValueRegsTemporary result(this);
12677 GPRReg regExpGPR = regExp.gpr();
12678 JSValueRegs resultRegs = result.regs();
12679 speculateRegExpObject(node->child1(), regExpGPR);
12680 m_jit.loadValue(JITCompiler::Address(regExpGPR, RegExpObject::offsetOfLastIndex()), resultRegs);
12681 jsValueResult(resultRegs, node);
12682}
12683
12684void SpeculativeJIT::compileSetRegExpObjectLastIndex(Node* node)
12685{
12686 SpeculateCellOperand regExp(this, node->child1());
12687 JSValueOperand value(this, node->child2());
12688 GPRReg regExpGPR = regExp.gpr();
12689 JSValueRegs valueRegs = value.jsValueRegs();
12690
12691 if (!node->ignoreLastIndexIsWritable()) {
12692 speculateRegExpObject(node->child1(), regExpGPR);
12693 speculationCheck(
12694 ExoticObjectMode, JSValueRegs(), nullptr,
12695 m_jit.branchTestPtr(
12696 JITCompiler::NonZero,
12697 JITCompiler::Address(regExpGPR, RegExpObject::offsetOfRegExpAndLastIndexIsNotWritableFlag()),
12698 JITCompiler::TrustedImm32(RegExpObject::lastIndexIsNotWritableFlag)));
12699 }
12700
12701 m_jit.storeValue(valueRegs, JITCompiler::Address(regExpGPR, RegExpObject::offsetOfLastIndex()));
12702 noResult(node);
12703}
12704
12705void SpeculativeJIT::compileRegExpExec(Node* node)
12706{
12707 bool sample = false;
12708 if (sample)
12709 m_jit.incrementSuperSamplerCount();
12710
12711 SpeculateCellOperand globalObject(this, node->child1());
12712 GPRReg globalObjectGPR = globalObject.gpr();
12713
12714 if (node->child2().useKind() == RegExpObjectUse) {
12715 if (node->child3().useKind() == StringUse) {
12716 SpeculateCellOperand base(this, node->child2());
12717 SpeculateCellOperand argument(this, node->child3());
12718 GPRReg baseGPR = base.gpr();
12719 GPRReg argumentGPR = argument.gpr();
12720 speculateRegExpObject(node->child2(), baseGPR);
12721 speculateString(node->child3(), argumentGPR);
12722
12723 flushRegisters();
12724 JSValueRegsFlushedCallResult result(this);
12725 JSValueRegs resultRegs = result.regs();
12726 callOperation(operationRegExpExecString, resultRegs, globalObjectGPR, baseGPR, argumentGPR);
12727 m_jit.exceptionCheck();
12728
12729 jsValueResult(resultRegs, node);
12730
12731 if (sample)
12732 m_jit.decrementSuperSamplerCount();
12733 return;
12734 }
12735
12736 SpeculateCellOperand base(this, node->child2());
12737 JSValueOperand argument(this, node->child3());
12738 GPRReg baseGPR = base.gpr();
12739 JSValueRegs argumentRegs = argument.jsValueRegs();
12740 speculateRegExpObject(node->child2(), baseGPR);
12741
12742 flushRegisters();
12743 JSValueRegsFlushedCallResult result(this);
12744 JSValueRegs resultRegs = result.regs();
12745 callOperation(operationRegExpExec, resultRegs, globalObjectGPR, baseGPR, argumentRegs);
12746 m_jit.exceptionCheck();
12747
12748 jsValueResult(resultRegs, node);
12749
12750 if (sample)
12751 m_jit.decrementSuperSamplerCount();
12752 return;
12753 }
12754
12755 JSValueOperand base(this, node->child2());
12756 JSValueOperand argument(this, node->child3());
12757 JSValueRegs baseRegs = base.jsValueRegs();
12758 JSValueRegs argumentRegs = argument.jsValueRegs();
12759
12760 flushRegisters();
12761 JSValueRegsFlushedCallResult result(this);
12762 JSValueRegs resultRegs = result.regs();
12763 callOperation(operationRegExpExecGeneric, resultRegs, globalObjectGPR, baseRegs, argumentRegs);
12764 m_jit.exceptionCheck();
12765
12766 jsValueResult(resultRegs, node);
12767
12768 if (sample)
12769 m_jit.decrementSuperSamplerCount();
12770}
12771
12772void SpeculativeJIT::compileRegExpTest(Node* node)
12773{
12774 SpeculateCellOperand globalObject(this, node->child1());
12775 GPRReg globalObjectGPR = globalObject.gpr();
12776
12777 if (node->child2().useKind() == RegExpObjectUse) {
12778 if (node->child3().useKind() == StringUse) {
12779 SpeculateCellOperand base(this, node->child2());
12780 SpeculateCellOperand argument(this, node->child3());
12781 GPRReg baseGPR = base.gpr();
12782 GPRReg argumentGPR = argument.gpr();
12783 speculateRegExpObject(node->child2(), baseGPR);
12784 speculateString(node->child3(), argumentGPR);
12785
12786 flushRegisters();
12787 GPRFlushedCallResult result(this);
12788 callOperation(operationRegExpTestString, result.gpr(), globalObjectGPR, baseGPR, argumentGPR);
12789 m_jit.exceptionCheck();
12790
12791 unblessedBooleanResult(result.gpr(), node);
12792 return;
12793 }
12794
12795 SpeculateCellOperand base(this, node->child2());
12796 JSValueOperand argument(this, node->child3());
12797 GPRReg baseGPR = base.gpr();
12798 JSValueRegs argumentRegs = argument.jsValueRegs();
12799 speculateRegExpObject(node->child2(), baseGPR);
12800
12801 flushRegisters();
12802 GPRFlushedCallResult result(this);
12803 callOperation(operationRegExpTest, result.gpr(), globalObjectGPR, baseGPR, argumentRegs);
12804 m_jit.exceptionCheck();
12805
12806 unblessedBooleanResult(result.gpr(), node);
12807 return;
12808 }
12809
12810 JSValueOperand base(this, node->child2());
12811 JSValueOperand argument(this, node->child3());
12812 JSValueRegs baseRegs = base.jsValueRegs();
12813 JSValueRegs argumentRegs = argument.jsValueRegs();
12814
12815 flushRegisters();
12816 GPRFlushedCallResult result(this);
12817 callOperation(operationRegExpTestGeneric, result.gpr(), globalObjectGPR, baseRegs, argumentRegs);
12818 m_jit.exceptionCheck();
12819
12820 unblessedBooleanResult(result.gpr(), node);
12821}
12822
12823void SpeculativeJIT::compileStringReplace(Node* node)
12824{
12825 ASSERT(node->op() == StringReplace || node->op() == StringReplaceRegExp);
12826 bool sample = false;
12827 if (sample)
12828 m_jit.incrementSuperSamplerCount();
12829
12830 if (node->child1().useKind() == StringUse
12831 && node->child2().useKind() == RegExpObjectUse
12832 && node->child3().useKind() == StringUse) {
12833 if (JSString* replace = node->child3()->dynamicCastConstant<JSString*>(vm())) {
12834 if (!replace->length()) {
12835 SpeculateCellOperand string(this, node->child1());
12836 SpeculateCellOperand regExp(this, node->child2());
12837 GPRReg stringGPR = string.gpr();
12838 GPRReg regExpGPR = regExp.gpr();
12839 speculateString(node->child1(), stringGPR);
12840 speculateRegExpObject(node->child2(), regExpGPR);
12841
12842 flushRegisters();
12843 GPRFlushedCallResult result(this);
12844 callOperation(operationStringProtoFuncReplaceRegExpEmptyStr, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, regExpGPR);
12845 m_jit.exceptionCheck();
12846 cellResult(result.gpr(), node);
12847 if (sample)
12848 m_jit.decrementSuperSamplerCount();
12849 return;
12850 }
12851 }
12852
12853 SpeculateCellOperand string(this, node->child1());
12854 SpeculateCellOperand regExp(this, node->child2());
12855 SpeculateCellOperand replace(this, node->child3());
12856 GPRReg stringGPR = string.gpr();
12857 GPRReg regExpGPR = regExp.gpr();
12858 GPRReg replaceGPR = replace.gpr();
12859 speculateString(node->child1(), stringGPR);
12860 speculateRegExpObject(node->child2(), regExpGPR);
12861 speculateString(node->child3(), replaceGPR);
12862
12863 flushRegisters();
12864 GPRFlushedCallResult result(this);
12865 callOperation(operationStringProtoFuncReplaceRegExpString, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringGPR, regExpGPR, replaceGPR);
12866 m_jit.exceptionCheck();
12867 cellResult(result.gpr(), node);
12868 if (sample)
12869 m_jit.decrementSuperSamplerCount();
12870 return;
12871 }
12872
12873 // If we fixed up the edge of child2, we inserted a Check(@child2, String).
12874 OperandSpeculationMode child2SpeculationMode = AutomaticOperandSpeculation;
12875 if (node->child2().useKind() == StringUse)
12876 child2SpeculationMode = ManualOperandSpeculation;
12877
12878 JSValueOperand string(this, node->child1());
12879 JSValueOperand search(this, node->child2(), child2SpeculationMode);
12880 JSValueOperand replace(this, node->child3());
12881 JSValueRegs stringRegs = string.jsValueRegs();
12882 JSValueRegs searchRegs = search.jsValueRegs();
12883 JSValueRegs replaceRegs = replace.jsValueRegs();
12884
12885 flushRegisters();
12886 GPRFlushedCallResult result(this);
12887 callOperation(operationStringProtoFuncReplaceGeneric, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), stringRegs, searchRegs, replaceRegs);
12888 m_jit.exceptionCheck();
12889 cellResult(result.gpr(), node);
12890 if (sample)
12891 m_jit.decrementSuperSamplerCount();
12892}
12893
12894void SpeculativeJIT::compileRegExpExecNonGlobalOrSticky(Node* node)
12895{
12896 SpeculateCellOperand globalObject(this, node->child1());
12897 SpeculateCellOperand argument(this, node->child2());
12898 GPRReg globalObjectGPR = globalObject.gpr();
12899 GPRReg argumentGPR = argument.gpr();
12900
12901 speculateString(node->child2(), argumentGPR);
12902
12903 flushRegisters();
12904 JSValueRegsFlushedCallResult result(this);
12905 JSValueRegs resultRegs = result.regs();
12906 callOperation(
12907 operationRegExpExecNonGlobalOrSticky, resultRegs,
12908 globalObjectGPR, TrustedImmPtr(node->cellOperand()), argumentGPR);
12909 m_jit.exceptionCheck();
12910
12911 jsValueResult(resultRegs, node);
12912}
12913
12914void SpeculativeJIT::compileRegExpMatchFastGlobal(Node* node)
12915{
12916 SpeculateCellOperand globalObject(this, node->child1());
12917 SpeculateCellOperand argument(this, node->child2());
12918 GPRReg globalObjectGPR = globalObject.gpr();
12919 GPRReg argumentGPR = argument.gpr();
12920
12921 speculateString(node->child2(), argumentGPR);
12922
12923 flushRegisters();
12924 JSValueRegsFlushedCallResult result(this);
12925 JSValueRegs resultRegs = result.regs();
12926 callOperation(
12927 operationRegExpMatchFastGlobalString, resultRegs,
12928 globalObjectGPR, TrustedImmPtr(node->cellOperand()), argumentGPR);
12929 m_jit.exceptionCheck();
12930
12931 jsValueResult(resultRegs, node);
12932}
12933
12934void SpeculativeJIT::compileRegExpMatchFast(Node* node)
12935{
12936 SpeculateCellOperand globalObject(this, node->child1());
12937 SpeculateCellOperand base(this, node->child2());
12938 SpeculateCellOperand argument(this, node->child3());
12939 GPRReg globalObjectGPR = globalObject.gpr();
12940 GPRReg baseGPR = base.gpr();
12941 GPRReg argumentGPR = argument.gpr();
12942 speculateRegExpObject(node->child2(), baseGPR);
12943 speculateString(node->child3(), argumentGPR);
12944
12945 flushRegisters();
12946 JSValueRegsFlushedCallResult result(this);
12947 JSValueRegs resultRegs = result.regs();
12948 callOperation(
12949 operationRegExpMatchFastString, resultRegs,
12950 globalObjectGPR, baseGPR, argumentGPR);
12951 m_jit.exceptionCheck();
12952
12953 jsValueResult(resultRegs, node);
12954}
12955
12956void SpeculativeJIT::compileLazyJSConstant(Node* node)
12957{
12958 JSValueRegsTemporary result(this);
12959 JSValueRegs resultRegs = result.regs();
12960 node->lazyJSValue().emit(m_jit, resultRegs);
12961 jsValueResult(resultRegs, node);
12962}
12963
12964void SpeculativeJIT::compileMaterializeNewObject(Node* node)
12965{
12966 RegisteredStructure structure = node->structureSet().at(0);
12967 ASSERT(m_jit.graph().varArgChild(node, 0)->dynamicCastConstant<Structure*>(vm()) == structure.get());
12968
12969 ObjectMaterializationData& data = node->objectMaterializationData();
12970
12971 IndexingType indexingType = structure->indexingType();
12972 bool hasIndexingHeader = hasIndexedProperties(indexingType);
12973 int32_t publicLength = 0;
12974 int32_t vectorLength = 0;
12975
12976 if (hasIndexingHeader) {
12977 for (unsigned i = data.m_properties.size(); i--;) {
12978 Edge edge = m_jit.graph().varArgChild(node, 1 + i);
12979 switch (data.m_properties[i].kind()) {
12980 case PublicLengthPLoc:
12981 publicLength = edge->asInt32();
12982 break;
12983 case VectorLengthPLoc:
12984 vectorLength = edge->asInt32();
12985 break;
12986 default:
12987 break;
12988 }
12989 }
12990 }
12991
12992 GPRTemporary result(this);
12993 GPRTemporary storage(this);
12994 GPRReg resultGPR = result.gpr();
12995 GPRReg storageGPR = storage.gpr();
12996
12997 emitAllocateRawObject(resultGPR, structure, storageGPR, 0, vectorLength);
12998
12999 // After the allocation, we must not exit until we fill butterfly completely.
13000
13001 m_jit.store32(
13002 JITCompiler::TrustedImm32(publicLength),
13003 JITCompiler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
13004
13005 for (unsigned i = data.m_properties.size(); i--;) {
13006 Edge edge = m_jit.graph().varArgChild(node, 1 + i);
13007 PromotedLocationDescriptor descriptor = data.m_properties[i];
13008 switch (descriptor.kind()) {
13009 case IndexedPropertyPLoc: {
13010 JSValueOperand value(this, edge);
13011 m_jit.storeValue(
13012 value.jsValueRegs(),
13013 JITCompiler::Address(storageGPR, sizeof(EncodedJSValue) * descriptor.info()));
13014 break;
13015 }
13016
13017 case NamedPropertyPLoc: {
13018 StringImpl* uid = m_jit.graph().identifiers()[descriptor.info()];
13019 for (PropertyMapEntry entry : structure->getPropertiesConcurrently()) {
13020 if (uid != entry.key)
13021 continue;
13022
13023 JSValueOperand value(this, edge);
13024 GPRReg baseGPR = isInlineOffset(entry.offset) ? resultGPR : storageGPR;
13025 m_jit.storeValue(
13026 value.jsValueRegs(),
13027 JITCompiler::Address(baseGPR, offsetRelativeToBase(entry.offset)));
13028 }
13029 break;
13030 }
13031
13032 default:
13033 break;
13034 }
13035 }
13036
13037 cellResult(resultGPR, node);
13038}
13039
13040void SpeculativeJIT::compileRecordRegExpCachedResult(Node* node)
13041{
13042 Edge globalObjectEdge = m_jit.graph().varArgChild(node, 0);
13043 Edge regExpEdge = m_jit.graph().varArgChild(node, 1);
13044 Edge stringEdge = m_jit.graph().varArgChild(node, 2);
13045 Edge startEdge = m_jit.graph().varArgChild(node, 3);
13046 Edge endEdge = m_jit.graph().varArgChild(node, 4);
13047
13048 SpeculateCellOperand globalObject(this, globalObjectEdge);
13049 SpeculateCellOperand regExp(this, regExpEdge);
13050 SpeculateCellOperand string(this, stringEdge);
13051 SpeculateInt32Operand start(this, startEdge);
13052 SpeculateInt32Operand end(this, endEdge);
13053
13054 GPRReg globalObjectGPR = globalObject.gpr();
13055 GPRReg regExpGPR = regExp.gpr();
13056 GPRReg stringGPR = string.gpr();
13057 GPRReg startGPR = start.gpr();
13058 GPRReg endGPR = end.gpr();
13059
13060 ptrdiff_t offset = JSGlobalObject::regExpGlobalDataOffset() + RegExpGlobalData::offsetOfCachedResult();
13061
13062 m_jit.storePtr(
13063 regExpGPR,
13064 JITCompiler::Address(globalObjectGPR, offset + RegExpCachedResult::offsetOfLastRegExp()));
13065 m_jit.storePtr(
13066 stringGPR,
13067 JITCompiler::Address(globalObjectGPR, offset + RegExpCachedResult::offsetOfLastInput()));
13068 m_jit.store32(
13069 startGPR,
13070 JITCompiler::Address(
13071 globalObjectGPR,
13072 offset + RegExpCachedResult::offsetOfResult() + OBJECT_OFFSETOF(MatchResult, start)));
13073 m_jit.store32(
13074 endGPR,
13075 JITCompiler::Address(
13076 globalObjectGPR,
13077 offset + RegExpCachedResult::offsetOfResult() + OBJECT_OFFSETOF(MatchResult, end)));
13078 m_jit.store8(
13079 TrustedImm32(0),
13080 JITCompiler::Address(globalObjectGPR, offset + RegExpCachedResult::offsetOfReified()));
13081
13082 noResult(node);
13083}
13084
13085void SpeculativeJIT::compileDefineDataProperty(Node* node)
13086{
13087#if USE(JSVALUE64)
13088 static_assert(GPRInfo::numberOfRegisters >= 5, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
13089#else
13090 static_assert(GPRInfo::numberOfRegisters >= 6, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
13091#endif
13092
13093 SpeculateCellOperand base(this, m_jit.graph().varArgChild(node, 0));
13094 GPRReg baseGPR = base.gpr();
13095
13096 JSValueOperand value(this, m_jit.graph().varArgChild(node, 2));
13097 JSValueRegs valueRegs = value.jsValueRegs();
13098
13099 SpeculateInt32Operand attributes(this, m_jit.graph().varArgChild(node, 3));
13100 GPRReg attributesGPR = attributes.gpr();
13101
13102 Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
13103 switch (propertyEdge.useKind()) {
13104 case StringUse: {
13105 SpeculateCellOperand property(this, propertyEdge);
13106 GPRReg propertyGPR = property.gpr();
13107 speculateString(propertyEdge, propertyGPR);
13108
13109 useChildren(node);
13110
13111 flushRegisters();
13112 callOperation(operationDefineDataPropertyString, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR, valueRegs, attributesGPR);
13113 m_jit.exceptionCheck();
13114 break;
13115 }
13116 case StringIdentUse: {
13117 SpeculateCellOperand property(this, propertyEdge);
13118 GPRTemporary ident(this);
13119
13120 GPRReg propertyGPR = property.gpr();
13121 GPRReg identGPR = ident.gpr();
13122
13123 speculateString(propertyEdge, propertyGPR);
13124 speculateStringIdentAndLoadStorage(propertyEdge, propertyGPR, identGPR);
13125
13126 useChildren(node);
13127
13128 flushRegisters();
13129 callOperation(operationDefineDataPropertyStringIdent, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, identGPR, valueRegs, attributesGPR);
13130 m_jit.exceptionCheck();
13131 break;
13132 }
13133 case SymbolUse: {
13134 SpeculateCellOperand property(this, propertyEdge);
13135 GPRReg propertyGPR = property.gpr();
13136 speculateSymbol(propertyEdge, propertyGPR);
13137
13138 useChildren(node);
13139
13140 flushRegisters();
13141 callOperation(operationDefineDataPropertySymbol, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR, valueRegs, attributesGPR);
13142 m_jit.exceptionCheck();
13143 break;
13144 }
13145 case UntypedUse: {
13146 JSValueOperand property(this, propertyEdge);
13147 JSValueRegs propertyRegs = property.jsValueRegs();
13148
13149 useChildren(node);
13150
13151 flushRegisters();
13152 callOperation(operationDefineDataProperty, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyRegs, valueRegs, attributesGPR);
13153 m_jit.exceptionCheck();
13154 break;
13155 }
13156 default:
13157 RELEASE_ASSERT_NOT_REACHED();
13158 }
13159
13160 noResult(node, UseChildrenCalledExplicitly);
13161}
13162
13163void SpeculativeJIT::compileDefineAccessorProperty(Node* node)
13164{
13165#if USE(JSVALUE64)
13166 static_assert(GPRInfo::numberOfRegisters >= 5, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
13167#else
13168 static_assert(GPRInfo::numberOfRegisters >= 6, "We are assuming we have enough registers to make this call without incrementally setting up the arguments.");
13169#endif
13170
13171 SpeculateCellOperand base(this, m_jit.graph().varArgChild(node, 0));
13172 GPRReg baseGPR = base.gpr();
13173
13174 SpeculateCellOperand getter(this, m_jit.graph().varArgChild(node, 2));
13175 GPRReg getterGPR = getter.gpr();
13176
13177 SpeculateCellOperand setter(this, m_jit.graph().varArgChild(node, 3));
13178 GPRReg setterGPR = setter.gpr();
13179
13180 SpeculateInt32Operand attributes(this, m_jit.graph().varArgChild(node, 4));
13181 GPRReg attributesGPR = attributes.gpr();
13182
13183 Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
13184 switch (propertyEdge.useKind()) {
13185 case StringUse: {
13186 SpeculateCellOperand property(this, propertyEdge);
13187 GPRReg propertyGPR = property.gpr();
13188 speculateString(propertyEdge, propertyGPR);
13189
13190 useChildren(node);
13191
13192 flushRegisters();
13193 callOperation(operationDefineAccessorPropertyString, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR, getterGPR, setterGPR, attributesGPR);
13194 m_jit.exceptionCheck();
13195 break;
13196 }
13197 case StringIdentUse: {
13198 SpeculateCellOperand property(this, propertyEdge);
13199 GPRTemporary ident(this);
13200
13201 GPRReg propertyGPR = property.gpr();
13202 GPRReg identGPR = ident.gpr();
13203
13204 speculateString(propertyEdge, propertyGPR);
13205 speculateStringIdentAndLoadStorage(propertyEdge, propertyGPR, identGPR);
13206
13207 useChildren(node);
13208
13209 flushRegisters();
13210 callOperation(operationDefineAccessorPropertyStringIdent, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, identGPR, getterGPR, setterGPR, attributesGPR);
13211 m_jit.exceptionCheck();
13212 break;
13213 }
13214 case SymbolUse: {
13215 SpeculateCellOperand property(this, propertyEdge);
13216 GPRReg propertyGPR = property.gpr();
13217 speculateSymbol(propertyEdge, propertyGPR);
13218
13219 useChildren(node);
13220
13221 flushRegisters();
13222 callOperation(operationDefineAccessorPropertySymbol, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR, getterGPR, setterGPR, attributesGPR);
13223 m_jit.exceptionCheck();
13224 break;
13225 }
13226 case UntypedUse: {
13227 JSValueOperand property(this, propertyEdge);
13228 JSValueRegs propertyRegs = property.jsValueRegs();
13229
13230 useChildren(node);
13231
13232 flushRegisters();
13233 callOperation(operationDefineAccessorProperty, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyRegs, getterGPR, setterGPR, attributesGPR);
13234 m_jit.exceptionCheck();
13235 break;
13236 }
13237 default:
13238 RELEASE_ASSERT_NOT_REACHED();
13239 }
13240
13241 noResult(node, UseChildrenCalledExplicitly);
13242}
13243
13244void SpeculativeJIT::emitAllocateButterfly(GPRReg storageResultGPR, GPRReg sizeGPR, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, MacroAssembler::JumpList& slowCases)
13245{
13246 RELEASE_ASSERT(RegisterSet(storageResultGPR, sizeGPR, scratch1, scratch2, scratch3).numberOfSetGPRs() == 5);
13247 ASSERT((1 << 3) == sizeof(JSValue));
13248 m_jit.zeroExtend32ToWord(sizeGPR, scratch1);
13249 m_jit.lshift32(TrustedImm32(3), scratch1);
13250 m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratch1, scratch2);
13251#if ASSERT_ENABLED
13252 MacroAssembler::Jump didNotOverflow = m_jit.branch32(MacroAssembler::AboveOrEqual, scratch2, sizeGPR);
13253 m_jit.abortWithReason(UncheckedOverflow);
13254 didNotOverflow.link(&m_jit);
13255#endif
13256 m_jit.emitAllocateVariableSized(
13257 storageResultGPR, vm().jsValueGigacageAuxiliarySpace, scratch2, scratch1, scratch3, slowCases);
13258 m_jit.addPtr(TrustedImm32(sizeof(IndexingHeader)), storageResultGPR);
13259
13260 m_jit.store32(sizeGPR, MacroAssembler::Address(storageResultGPR, Butterfly::offsetOfPublicLength()));
13261 m_jit.store32(sizeGPR, MacroAssembler::Address(storageResultGPR, Butterfly::offsetOfVectorLength()));
13262}
13263
13264void SpeculativeJIT::compileNormalizeMapKey(Node* node)
13265{
13266 ASSERT(node->child1().useKind() == UntypedUse);
13267 JSValueOperand key(this, node->child1());
13268 JSValueRegsTemporary result(this, Reuse, key);
13269 GPRTemporary scratch(this);
13270 FPRTemporary doubleValue(this);
13271 FPRTemporary temp(this);
13272
13273 JSValueRegs keyRegs = key.jsValueRegs();
13274 JSValueRegs resultRegs = result.regs();
13275 GPRReg scratchGPR = scratch.gpr();
13276 FPRReg doubleValueFPR = doubleValue.fpr();
13277 FPRReg tempFPR = temp.fpr();
13278
13279 CCallHelpers::JumpList passThroughCases;
13280 CCallHelpers::JumpList doneCases;
13281
13282 auto isNotCell = m_jit.branchIfNotCell(keyRegs);
13283 passThroughCases.append(m_jit.branchIfNotHeapBigInt(keyRegs.payloadGPR()));
13284 auto slowPath = m_jit.jump();
13285 isNotCell.link(&m_jit);
13286
13287 passThroughCases.append(m_jit.branchIfNotNumber(keyRegs, scratchGPR));
13288 passThroughCases.append(m_jit.branchIfInt32(keyRegs));
13289
13290#if USE(JSVALUE64)
13291 m_jit.unboxDoubleWithoutAssertions(keyRegs.gpr(), scratchGPR, doubleValueFPR);
13292#else
13293 unboxDouble(keyRegs.tagGPR(), keyRegs.payloadGPR(), doubleValueFPR, tempFPR);
13294#endif
13295 auto notNaN = m_jit.branchIfNotNaN(doubleValueFPR);
13296 m_jit.moveTrustedValue(jsNaN(), resultRegs);
13297 doneCases.append(m_jit.jump());
13298
13299 notNaN.link(&m_jit);
13300 m_jit.truncateDoubleToInt32(doubleValueFPR, scratchGPR);
13301 m_jit.convertInt32ToDouble(scratchGPR, tempFPR);
13302 passThroughCases.append(m_jit.branchDouble(JITCompiler::DoubleNotEqualAndOrdered, doubleValueFPR, tempFPR));
13303
13304 m_jit.boxInt32(scratchGPR, resultRegs);
13305 doneCases.append(m_jit.jump());
13306
13307 passThroughCases.link(&m_jit);
13308 m_jit.moveValueRegs(keyRegs, resultRegs);
13309 addSlowPathGenerator(slowPathCall(slowPath, this, operationNormalizeMapKeyHeapBigInt, NeedToSpill, ExceptionCheckRequirement::CheckNotNeeded, resultRegs, &vm(), keyRegs.payloadGPR()));
13310
13311 doneCases.link(&m_jit);
13312 jsValueResult(resultRegs, node);
13313}
13314
13315void SpeculativeJIT::compileGetMapBucketHead(Node* node)
13316{
13317 SpeculateCellOperand map(this, node->child1());
13318 GPRTemporary bucket(this);
13319
13320 GPRReg mapGPR = map.gpr();
13321 GPRReg bucketGPR = bucket.gpr();
13322
13323 if (node->child1().useKind() == MapObjectUse)
13324 speculateMapObject(node->child1(), mapGPR);
13325 else if (node->child1().useKind() == SetObjectUse)
13326 speculateSetObject(node->child1(), mapGPR);
13327 else
13328 RELEASE_ASSERT_NOT_REACHED();
13329
13330 ASSERT(HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead() == HashMapImpl<HashMapBucket<HashMapBucketDataKeyValue>>::offsetOfHead());
13331 m_jit.loadPtr(MacroAssembler::Address(mapGPR, HashMapImpl<HashMapBucket<HashMapBucketDataKey>>::offsetOfHead()), bucketGPR);
13332 cellResult(bucketGPR, node);
13333}
13334
13335void SpeculativeJIT::compileGetMapBucketNext(Node* node)
13336{
13337 SpeculateCellOperand bucket(this, node->child1());
13338 GPRTemporary result(this);
13339
13340 GPRReg bucketGPR = bucket.gpr();
13341 GPRReg resultGPR = result.gpr();
13342
13343 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfNext() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext());
13344 ASSERT(HashMapBucket<HashMapBucketDataKey>::offsetOfKey() == HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey());
13345 m_jit.loadPtr(MacroAssembler::Address(bucketGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext()), resultGPR);
13346
13347 MacroAssembler::Label loop = m_jit.label();
13348 auto notBucket = m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR);
13349#if USE(JSVALUE32_64)
13350 auto done = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::Address(resultGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey() + TagOffset), TrustedImm32(JSValue::EmptyValueTag));
13351#else
13352 auto done = m_jit.branchTest64(MacroAssembler::NonZero, MacroAssembler::Address(resultGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey()));
13353#endif
13354 m_jit.loadPtr(MacroAssembler::Address(resultGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfNext()), resultGPR);
13355 m_jit.jump().linkTo(loop, &m_jit);
13356
13357 notBucket.link(&m_jit);
13358 JSCell* sentinel = nullptr;
13359 if (node->bucketOwnerType() == BucketOwnerType::Map)
13360 sentinel = vm().sentinelMapBucket();
13361 else {
13362 ASSERT(node->bucketOwnerType() == BucketOwnerType::Set);
13363 sentinel = vm().sentinelSetBucket();
13364 }
13365 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), sentinel), resultGPR);
13366 done.link(&m_jit);
13367
13368 cellResult(resultGPR, node);
13369}
13370
13371void SpeculativeJIT::compileLoadKeyFromMapBucket(Node* node)
13372{
13373 SpeculateCellOperand bucket(this, node->child1());
13374 JSValueRegsTemporary result(this);
13375
13376 GPRReg bucketGPR = bucket.gpr();
13377 JSValueRegs resultRegs = result.regs();
13378
13379 m_jit.loadValue(MacroAssembler::Address(bucketGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfKey()), resultRegs);
13380 jsValueResult(resultRegs, node);
13381}
13382
13383void SpeculativeJIT::compileLoadValueFromMapBucket(Node* node)
13384{
13385 SpeculateCellOperand bucket(this, node->child1());
13386 JSValueRegsTemporary result(this);
13387
13388 GPRReg bucketGPR = bucket.gpr();
13389 JSValueRegs resultRegs = result.regs();
13390
13391 m_jit.loadValue(MacroAssembler::Address(bucketGPR, HashMapBucket<HashMapBucketDataKeyValue>::offsetOfValue()), resultRegs);
13392 jsValueResult(resultRegs, node);
13393}
13394
13395void SpeculativeJIT::compileExtractValueFromWeakMapGet(Node* node)
13396{
13397 JSValueOperand value(this, node->child1());
13398 JSValueRegsTemporary result(this, Reuse, value);
13399
13400 JSValueRegs valueRegs = value.jsValueRegs();
13401 JSValueRegs resultRegs = result.regs();
13402
13403#if USE(JSVALUE64)
13404 m_jit.moveValueRegs(valueRegs, resultRegs);
13405 auto done = m_jit.branchTestPtr(CCallHelpers::NonZero, resultRegs.payloadGPR());
13406 m_jit.moveValue(jsUndefined(), resultRegs);
13407 done.link(&m_jit);
13408#else
13409 auto isEmpty = m_jit.branchIfEmpty(valueRegs.tagGPR());
13410 m_jit.moveValueRegs(valueRegs, resultRegs);
13411 auto done = m_jit.jump();
13412
13413 isEmpty.link(&m_jit);
13414 m_jit.moveValue(jsUndefined(), resultRegs);
13415
13416 done.link(&m_jit);
13417#endif
13418
13419 jsValueResult(resultRegs, node, DataFormatJS);
13420}
13421
13422void SpeculativeJIT::compileThrow(Node* node)
13423{
13424 JSValueOperand value(this, node->child1());
13425 JSValueRegs valueRegs = value.jsValueRegs();
13426 flushRegisters();
13427 callOperation(operationThrowDFG, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs);
13428 m_jit.exceptionCheck();
13429 m_jit.breakpoint();
13430 noResult(node);
13431}
13432
13433void SpeculativeJIT::compileThrowStaticError(Node* node)
13434{
13435 SpeculateCellOperand message(this, node->child1());
13436 GPRReg messageGPR = message.gpr();
13437 speculateString(node->child1(), messageGPR);
13438 flushRegisters();
13439 callOperation(operationThrowStaticError, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), messageGPR, node->errorType());
13440 m_jit.exceptionCheck();
13441 m_jit.breakpoint();
13442 noResult(node);
13443}
13444
13445void SpeculativeJIT::compileGetEnumerableLength(Node* node)
13446{
13447 SpeculateCellOperand enumerator(this, node->child1());
13448 GPRTemporary result(this, Reuse, enumerator);
13449 GPRReg enumeratorGPR = enumerator.gpr();
13450 GPRReg resultGPR = result.gpr();
13451
13452 m_jit.load32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR);
13453 strictInt32Result(resultGPR, node);
13454}
13455
13456void SpeculativeJIT::compileHasEnumerableProperty(Node* node)
13457{
13458 JSValueOperand base(this, node->child1());
13459 SpeculateCellOperand property(this, node->child2());
13460
13461 JSValueRegs baseRegs = base.jsValueRegs();
13462 GPRReg propertyGPR = property.gpr();
13463
13464 flushRegisters();
13465 JSValueRegsFlushedCallResult result(this);
13466 JSValueRegs resultRegs = result.regs();
13467 callOperation(operationHasEnumerableProperty, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs, propertyGPR);
13468 m_jit.exceptionCheck();
13469 blessedBooleanResult(resultRegs.payloadGPR(), node);
13470}
13471
13472void SpeculativeJIT::compileToIndexString(Node* node)
13473{
13474 SpeculateInt32Operand index(this, node->child1());
13475 GPRReg indexGPR = index.gpr();
13476
13477 flushRegisters();
13478 GPRFlushedCallResult result(this);
13479 GPRReg resultGPR = result.gpr();
13480 callOperation(operationToIndexString, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), indexGPR);
13481 m_jit.exceptionCheck();
13482 cellResult(resultGPR, node);
13483}
13484
13485void SpeculativeJIT::compilePutByIdFlush(Node* node)
13486{
13487 std::optional<GPRTemporary> stubInfo;
13488 SpeculateCellOperand base(this, node->child1());
13489 JSValueOperand value(this, node->child2());
13490 GPRTemporary scratch(this);
13491
13492 GPRReg stubInfoGPR = InvalidGPRReg;
13493 if (JITCode::useDataIC(JITType::DFGJIT)) {
13494 stubInfo.emplace(this);
13495 stubInfoGPR = stubInfo->gpr();
13496 }
13497 GPRReg baseGPR = base.gpr();
13498 JSValueRegs valueRegs = value.jsValueRegs();
13499 GPRReg scratchGPR = scratch.gpr();
13500 flushRegisters();
13501
13502 cachedPutById(node->origin.semantic, baseGPR, valueRegs, stubInfoGPR, scratchGPR, node->cacheableIdentifier(), PutKind::NotDirect, node->ecmaMode(), MacroAssembler::Jump(), DontSpill);
13503
13504 noResult(node);
13505}
13506
13507void SpeculativeJIT::compilePutById(Node* node)
13508{
13509 std::optional<GPRTemporary> stubInfo;
13510 SpeculateCellOperand base(this, node->child1());
13511 JSValueOperand value(this, node->child2());
13512 GPRTemporary scratch(this);
13513
13514 GPRReg stubInfoGPR = InvalidGPRReg;
13515 if (JITCode::useDataIC(JITType::DFGJIT)) {
13516 stubInfo.emplace(this);
13517 stubInfoGPR = stubInfo->gpr();
13518 }
13519 GPRReg baseGPR = base.gpr();
13520 JSValueRegs valueRegs = value.jsValueRegs();
13521 GPRReg scratchGPR = scratch.gpr();
13522
13523 cachedPutById(node->origin.semantic, baseGPR, valueRegs, stubInfoGPR, scratchGPR, node->cacheableIdentifier(), PutKind::NotDirect, node->ecmaMode());
13524
13525 noResult(node);
13526}
13527
13528void SpeculativeJIT::compilePutByIdDirect(Node* node)
13529{
13530 std::optional<GPRTemporary> stubInfo;
13531 SpeculateCellOperand base(this, node->child1());
13532 JSValueOperand value(this, node->child2());
13533 GPRTemporary scratch(this);
13534
13535 GPRReg stubInfoGPR = InvalidGPRReg;
13536 if (JITCode::useDataIC(JITType::DFGJIT)) {
13537 stubInfo.emplace(this);
13538 stubInfoGPR = stubInfo->gpr();
13539 }
13540 GPRReg baseGPR = base.gpr();
13541 JSValueRegs valueRegs = value.jsValueRegs();
13542 GPRReg scratchGPR = scratch.gpr();
13543
13544 cachedPutById(node->origin.semantic, baseGPR, valueRegs, stubInfoGPR, scratchGPR, node->cacheableIdentifier(), PutKind::Direct, node->ecmaMode());
13545
13546 noResult(node);
13547}
13548
13549void SpeculativeJIT::compilePutByIdWithThis(Node* node)
13550{
13551 JSValueOperand base(this, node->child1());
13552 JSValueRegs baseRegs = base.jsValueRegs();
13553 JSValueOperand thisValue(this, node->child2());
13554 JSValueRegs thisRegs = thisValue.jsValueRegs();
13555 JSValueOperand value(this, node->child3());
13556 JSValueRegs valueRegs = value.jsValueRegs();
13557
13558 flushRegisters();
13559 callOperation(node->ecmaMode().isStrict() ? operationPutByIdWithThisStrict : operationPutByIdWithThis,
13560 TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs, thisRegs, valueRegs, node->cacheableIdentifier().rawBits());
13561 m_jit.exceptionCheck();
13562
13563 noResult(node);
13564}
13565
13566void SpeculativeJIT::compileGetByOffset(Node* node)
13567{
13568 StorageOperand storage(this, node->child1());
13569 JSValueRegsTemporary result(this, Reuse, storage);
13570
13571 GPRReg storageGPR = storage.gpr();
13572 JSValueRegs resultRegs = result.regs();
13573
13574 StorageAccessData& storageAccessData = node->storageAccessData();
13575
13576 m_jit.loadValue(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)), resultRegs);
13577
13578 jsValueResult(resultRegs, node);
13579}
13580
13581void SpeculativeJIT::compilePutByOffset(Node* node)
13582{
13583 StorageOperand storage(this, node->child1());
13584 JSValueOperand value(this, node->child3());
13585
13586 GPRReg storageGPR = storage.gpr();
13587 JSValueRegs valueRegs = value.jsValueRegs();
13588
13589 speculate(node, node->child2());
13590
13591 StorageAccessData& storageAccessData = node->storageAccessData();
13592
13593 m_jit.storeValue(valueRegs, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset)));
13594
13595 noResult(node);
13596}
13597
13598void SpeculativeJIT::compileMatchStructure(Node* node)
13599{
13600 SpeculateCellOperand base(this, node->child1());
13601 GPRTemporary temp(this);
13602 GPRReg baseGPR = base.gpr();
13603 GPRReg tempGPR = temp.gpr();
13604
13605 m_jit.load32(JITCompiler::Address(baseGPR, JSCell::structureIDOffset()), tempGPR);
13606
13607 auto& variants = node->matchStructureData().variants;
13608 Vector<int64_t> cases;
13609 for (MatchStructureVariant& variant : variants)
13610 cases.append(bitwise_cast<int32_t>(variant.structure->id()));
13611
13612 BinarySwitch binarySwitch(tempGPR, cases, BinarySwitch::Int32);
13613 JITCompiler::JumpList done;
13614 while (binarySwitch.advance(m_jit)) {
13615 m_jit.boxBooleanPayload(variants[binarySwitch.caseIndex()].result, tempGPR);
13616 done.append(m_jit.jump());
13617 }
13618 speculationCheck(BadCache, JSValueRegs(), node, binarySwitch.fallThrough());
13619
13620 done.link(&m_jit);
13621
13622 blessedBooleanResult(tempGPR, node);
13623}
13624
13625void SpeculativeJIT::compileHasEnumerableStructureProperty(Node* node)
13626{
13627 JSValueOperand base(this, node->child1());
13628 SpeculateCellOperand property(this, node->child2());
13629 SpeculateCellOperand enumerator(this, node->child3());
13630 JSValueRegsTemporary result(this);
13631
13632 JSValueRegs baseRegs = base.jsValueRegs();
13633 GPRReg propertyGPR = property.gpr();
13634 JSValueRegs resultRegs = result.regs();
13635
13636 CCallHelpers::JumpList wrongStructure;
13637
13638 wrongStructure.append(m_jit.branchIfNotCell(baseRegs));
13639
13640 m_jit.load32(MacroAssembler::Address(baseRegs.payloadGPR(), JSCell::structureIDOffset()), resultRegs.payloadGPR());
13641 wrongStructure.append(m_jit.branch32(MacroAssembler::NotEqual,
13642 resultRegs.payloadGPR(),
13643 MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset())));
13644
13645 moveTrueTo(resultRegs.payloadGPR());
13646
13647 addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasEnumerableProperty, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs, propertyGPR));
13648 blessedBooleanResult(resultRegs.payloadGPR(), node);
13649}
13650
13651template <typename Function>
13652void SpeculativeJIT::compileHasOwnStructurePropertyImpl(Node* node, Function slowPath)
13653{
13654 SpeculateCellOperand base(this, node->child1());
13655 SpeculateCellOperand property(this, node->child2());
13656 SpeculateCellOperand enumerator(this, node->child3());
13657 JSValueRegsTemporary result(this);
13658
13659 GPRReg baseGPR = base.gpr();
13660 GPRReg propertyGPR = property.gpr();
13661 JSValueRegs resultRegs = result.regs();
13662
13663 CCallHelpers::JumpList wrongStructure;
13664
13665 m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), resultRegs.payloadGPR());
13666 wrongStructure.append(m_jit.branch32(MacroAssembler::NotEqual,
13667 resultRegs.payloadGPR(),
13668 MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset())));
13669
13670 moveTrueTo(resultRegs.payloadGPR());
13671
13672 addSlowPathGenerator(slowPathCall(wrongStructure, this, slowPath, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, propertyGPR));
13673 blessedBooleanResult(resultRegs.payloadGPR(), node);
13674}
13675
13676void SpeculativeJIT::compileHasOwnStructureProperty(Node* node)
13677{
13678 compileHasOwnStructurePropertyImpl(node, operationHasOwnStructureProperty);
13679}
13680
13681void SpeculativeJIT::compileInStructureProperty(Node* node)
13682{
13683 compileHasOwnStructurePropertyImpl(node, operationInStructureProperty);
13684}
13685
13686void SpeculativeJIT::compileGetPropertyEnumerator(Node* node)
13687{
13688 if (node->child1().useKind() == CellUse) {
13689 SpeculateCellOperand base(this, node->child1());
13690 GPRReg baseGPR = base.gpr();
13691
13692 flushRegisters();
13693 GPRFlushedCallResult result(this);
13694 GPRReg resultGPR = result.gpr();
13695 callOperation(operationGetPropertyEnumeratorCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR);
13696 m_jit.exceptionCheck();
13697 cellResult(resultGPR, node);
13698 return;
13699 }
13700
13701 JSValueOperand base(this, node->child1());
13702 JSValueRegs baseRegs = base.jsValueRegs();
13703
13704 flushRegisters();
13705 GPRFlushedCallResult result(this);
13706 GPRReg resultGPR = result.gpr();
13707 callOperation(operationGetPropertyEnumerator, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseRegs);
13708 m_jit.exceptionCheck();
13709 cellResult(resultGPR, node);
13710}
13711
13712void SpeculativeJIT::compileGetEnumeratorPname(Node* node)
13713{
13714 ASSERT(node->op() == GetEnumeratorStructurePname || node->op() == GetEnumeratorGenericPname);
13715 SpeculateCellOperand enumerator(this, node->child1());
13716 SpeculateStrictInt32Operand index(this, node->child2());
13717 GPRTemporary scratch(this);
13718 JSValueRegsTemporary result(this);
13719
13720 GPRReg enumeratorGPR = enumerator.gpr();
13721 GPRReg indexGPR = index.gpr();
13722 GPRReg scratchGPR = scratch.gpr();
13723 JSValueRegs resultRegs = result.regs();
13724
13725 MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR,
13726 MacroAssembler::Address(enumeratorGPR, (node->op() == GetEnumeratorStructurePname)
13727 ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
13728 : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
13729
13730 m_jit.moveValue(jsNull(), resultRegs);
13731
13732 MacroAssembler::Jump done = m_jit.jump();
13733 inBounds.link(&m_jit);
13734
13735 m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR);
13736 m_jit.loadPtr(MacroAssembler::BaseIndex(scratchGPR, indexGPR, MacroAssembler::ScalePtr), resultRegs.payloadGPR());
13737#if USE(JSVALUE32_64)
13738 m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), resultRegs.tagGPR());
13739#endif
13740
13741 done.link(&m_jit);
13742 jsValueResult(resultRegs, node);
13743}
13744
13745void SpeculativeJIT::compileGetExecutable(Node* node)
13746{
13747 SpeculateCellOperand function(this, node->child1());
13748 GPRTemporary result(this, Reuse, function);
13749 speculateFunction(node->child1(), function.gpr());
13750 getExecutable(m_jit, function.gpr(), result.gpr());
13751 cellResult(result.gpr(), node);
13752}
13753
13754void SpeculativeJIT::compileGetGetter(Node* node)
13755{
13756 SpeculateCellOperand op1(this, node->child1());
13757 GPRTemporary result(this, Reuse, op1);
13758
13759 GPRReg op1GPR = op1.gpr();
13760 GPRReg resultGPR = result.gpr();
13761
13762 m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR);
13763
13764 cellResult(resultGPR, node);
13765}
13766
13767void SpeculativeJIT::compileGetSetter(Node* node)
13768{
13769 SpeculateCellOperand op1(this, node->child1());
13770 GPRTemporary result(this, Reuse, op1);
13771
13772 GPRReg op1GPR = op1.gpr();
13773 GPRReg resultGPR = result.gpr();
13774
13775 m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR);
13776
13777 cellResult(resultGPR, node);
13778}
13779
13780void SpeculativeJIT::compileGetCallee(Node* node)
13781{
13782 GPRTemporary result(this);
13783 m_jit.loadPtr(JITCompiler::payloadFor(CallFrameSlot::callee), result.gpr());
13784 cellResult(result.gpr(), node);
13785}
13786
13787void SpeculativeJIT::compileSetCallee(Node* node)
13788{
13789 SpeculateCellOperand callee(this, node->child1());
13790 m_jit.storeCell(callee.gpr(), JITCompiler::payloadFor(CallFrameSlot::callee));
13791 noResult(node);
13792}
13793
13794void SpeculativeJIT::compileGetArgumentCountIncludingThis(Node* node)
13795{
13796 GPRTemporary result(this);
13797 VirtualRegister argumentCountRegister;
13798 if (InlineCallFrame* inlineCallFrame = node->argumentsInlineCallFrame())
13799 argumentCountRegister = inlineCallFrame->argumentCountRegister;
13800 else
13801 argumentCountRegister = CallFrameSlot::argumentCountIncludingThis;
13802 m_jit.load32(JITCompiler::payloadFor(argumentCountRegister), result.gpr());
13803 strictInt32Result(result.gpr(), node);
13804}
13805
13806void SpeculativeJIT::compileSetArgumentCountIncludingThis(Node* node)
13807{
13808 m_jit.store32(TrustedImm32(node->argumentCountIncludingThis()), JITCompiler::payloadFor(CallFrameSlot::argumentCountIncludingThis));
13809 noResult(node);
13810}
13811
13812void SpeculativeJIT::compileStrCat(Node* node)
13813{
13814 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
13815 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
13816 JSValueOperand op3(this, node->child3(), ManualOperandSpeculation);
13817
13818 JSValueRegs op1Regs = op1.jsValueRegs();
13819 JSValueRegs op2Regs = op2.jsValueRegs();
13820 JSValueRegs op3Regs;
13821
13822 if (node->child3())
13823 op3Regs = op3.jsValueRegs();
13824
13825 flushRegisters();
13826
13827 GPRFlushedCallResult result(this);
13828 if (node->child3())
13829 callOperation(operationStrCat3, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs, op2Regs, op3Regs);
13830 else
13831 callOperation(operationStrCat2, result.gpr(), TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), op1Regs, op2Regs);
13832 m_jit.exceptionCheck();
13833
13834 cellResult(result.gpr(), node);
13835}
13836
13837void SpeculativeJIT::compileNewArrayBuffer(Node* node)
13838{
13839 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
13840 auto* array = node->castOperand<JSImmutableButterfly*>();
13841
13842 IndexingType indexingMode = node->indexingMode();
13843 RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingMode));
13844
13845 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingMode)) {
13846 GPRTemporary result(this);
13847 GPRTemporary scratch1(this);
13848 GPRTemporary scratch2(this);
13849
13850 GPRReg resultGPR = result.gpr();
13851 GPRReg scratch1GPR = scratch1.gpr();
13852 GPRReg scratch2GPR = scratch2.gpr();
13853
13854 MacroAssembler::JumpList slowCases;
13855
13856 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(array->toButterfly()), scratch1GPR, scratch2GPR, slowCases);
13857
13858 addSlowPathGenerator(slowPathCall(slowCases, this, operationNewArrayBuffer, result.gpr(), &vm(), structure, array));
13859
13860 DFG_ASSERT(m_jit.graph(), node, indexingMode & IsArray, indexingMode);
13861 cellResult(resultGPR, node);
13862 return;
13863 }
13864
13865 flushRegisters();
13866 GPRFlushedCallResult result(this);
13867
13868 callOperation(operationNewArrayBuffer, result.gpr(), &vm(), structure, TrustedImmPtr(node->cellOperand()));
13869 m_jit.exceptionCheck();
13870
13871 cellResult(result.gpr(), node);
13872}
13873
13874void SpeculativeJIT::compileNewArrayWithSize(Node* node)
13875{
13876 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
13877 if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
13878 SpeculateStrictInt32Operand size(this, node->child1());
13879 GPRTemporary result(this);
13880
13881 GPRReg sizeGPR = size.gpr();
13882 GPRReg resultGPR = result.gpr();
13883
13884 compileAllocateNewArrayWithSize(globalObject, resultGPR, sizeGPR, node->indexingType());
13885 cellResult(resultGPR, node);
13886 return;
13887 }
13888
13889 SpeculateStrictInt32Operand size(this, node->child1());
13890 GPRReg sizeGPR = size.gpr();
13891 flushRegisters();
13892 GPRFlushedCallResult result(this);
13893 GPRReg resultGPR = result.gpr();
13894 GPRReg structureGPR = AssemblyHelpers::selectScratchGPR(sizeGPR);
13895 MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
13896 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()))), structureGPR);
13897 MacroAssembler::Jump done = m_jit.jump();
13898 bigLength.link(&m_jit);
13899 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage))), structureGPR);
13900 done.link(&m_jit);
13901 callOperation(operationNewArrayWithSize, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), structureGPR, sizeGPR, nullptr);
13902 m_jit.exceptionCheck();
13903 cellResult(resultGPR, node);
13904}
13905
13906void SpeculativeJIT::compileNewTypedArray(Node* node)
13907{
13908 switch (node->child1().useKind()) {
13909 case Int32Use:
13910 compileNewTypedArrayWithSize(node);
13911 break;
13912 case UntypedUse: {
13913 JSValueOperand argument(this, node->child1());
13914 JSValueRegs argumentRegs = argument.jsValueRegs();
13915
13916 flushRegisters();
13917
13918 GPRFlushedCallResult result(this);
13919 GPRReg resultGPR = result.gpr();
13920
13921 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
13922 callOperation(
13923 operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()),
13924 resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), m_jit.graph().registerStructure(globalObject->typedArrayStructureConcurrently(node->typedArrayType())), argumentRegs);
13925 m_jit.exceptionCheck();
13926
13927 cellResult(resultGPR, node);
13928 break;
13929 }
13930 default:
13931 RELEASE_ASSERT_NOT_REACHED();
13932 break;
13933 }
13934}
13935
13936void SpeculativeJIT::compileToThis(Node* node)
13937{
13938 ASSERT(node->child1().useKind() == UntypedUse);
13939 JSValueOperand thisValue(this, node->child1());
13940 JSValueRegsTemporary temp(this);
13941
13942 JSValueRegs thisValueRegs = thisValue.jsValueRegs();
13943 JSValueRegs tempRegs = temp.regs();
13944
13945 MacroAssembler::JumpList slowCases;
13946 slowCases.append(m_jit.branchIfNotCell(thisValueRegs));
13947 slowCases.append(
13948 m_jit.branchTest8(
13949 MacroAssembler::NonZero,
13950 MacroAssembler::Address(thisValueRegs.payloadGPR(), JSCell::typeInfoFlagsOffset()),
13951 MacroAssembler::TrustedImm32(OverridesToThis)));
13952 m_jit.moveValueRegs(thisValueRegs, tempRegs);
13953
13954 J_JITOperation_GJ function;
13955 if (node->ecmaMode().isStrict())
13956 function = operationToThisStrict;
13957 else
13958 function = operationToThis;
13959 addSlowPathGenerator(slowPathCall(slowCases, this, function, tempRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), thisValueRegs));
13960
13961 jsValueResult(tempRegs, node);
13962}
13963
13964void SpeculativeJIT::compileObjectKeysOrObjectGetOwnPropertyNames(Node* node)
13965{
13966 switch (node->child1().useKind()) {
13967 case ObjectUse: {
13968 if (m_graph.isWatchingHavingABadTimeWatchpoint(node)) {
13969 SpeculateCellOperand object(this, node->child1());
13970 GPRTemporary structure(this);
13971 GPRTemporary scratch(this);
13972 GPRTemporary scratch2(this);
13973 GPRTemporary scratch3(this);
13974 GPRTemporary result(this);
13975
13976 GPRReg objectGPR = object.gpr();
13977 GPRReg structureGPR = structure.gpr();
13978 GPRReg scratchGPR = scratch.gpr();
13979 GPRReg scratch2GPR = scratch2.gpr();
13980 GPRReg scratch3GPR = scratch3.gpr();
13981 GPRReg resultGPR = result.gpr();
13982
13983 speculateObject(node->child1(), objectGPR);
13984
13985 CCallHelpers::JumpList slowCases;
13986 m_jit.emitLoadStructure(vm(), objectGPR, structureGPR, scratchGPR);
13987 m_jit.loadPtr(CCallHelpers::Address(structureGPR, Structure::previousOrRareDataOffset()), scratchGPR);
13988
13989 slowCases.append(m_jit.branchTestPtr(CCallHelpers::Zero, scratchGPR));
13990 slowCases.append(m_jit.branch32(CCallHelpers::Equal, CCallHelpers::Address(scratchGPR, JSCell::structureIDOffset()), TrustedImm32(bitwise_cast<int32_t>(vm().structureStructure->structureID()))));
13991
13992 m_jit.loadPtr(CCallHelpers::Address(scratchGPR, StructureRareData::offsetOfCachedPropertyNames(node->op() == ObjectKeys ? CachedPropertyNamesKind::Keys : CachedPropertyNamesKind::GetOwnPropertyNames)), scratchGPR);
13993
13994 ASSERT(bitwise_cast<uintptr_t>(StructureRareData::cachedPropertyNamesSentinel()) == 1);
13995 slowCases.append(m_jit.branchPtr(CCallHelpers::BelowOrEqual, scratchGPR, TrustedImmPtr(bitwise_cast<void*>(StructureRareData::cachedPropertyNamesSentinel()))));
13996
13997 MacroAssembler::JumpList slowButArrayBufferCases;
13998
13999 JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
14000 RegisteredStructure arrayStructure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(CopyOnWriteArrayWithContiguous));
14001
14002 m_jit.move(scratchGPR, scratch3GPR);
14003 m_jit.addPtr(TrustedImm32(JSImmutableButterfly::offsetOfData()), scratchGPR);
14004
14005 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(arrayStructure), scratchGPR, structureGPR, scratch2GPR, slowButArrayBufferCases);
14006
14007 addSlowPathGenerator(slowPathCall(slowButArrayBufferCases, this, operationNewArrayBuffer, resultGPR, &vm(), arrayStructure, scratch3GPR));
14008
14009 addSlowPathGenerator(slowPathCall(slowCases, this, node->op() == ObjectKeys ? operationObjectKeysObject : operationObjectGetOwnPropertyNamesObject, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), objectGPR));
14010
14011 cellResult(resultGPR, node);
14012 break;
14013 }
14014
14015 SpeculateCellOperand object(this, node->child1());
14016
14017 GPRReg objectGPR = object.gpr();
14018
14019 speculateObject(node->child1(), objectGPR);
14020
14021 flushRegisters();
14022 GPRFlushedCallResult result(this);
14023 GPRReg resultGPR = result.gpr();
14024 callOperation(node->op() == ObjectKeys ? operationObjectKeysObject : operationObjectGetOwnPropertyNamesObject, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), objectGPR);
14025 m_jit.exceptionCheck();
14026
14027 cellResult(resultGPR, node);
14028 break;
14029 }
14030
14031 case UntypedUse: {
14032 JSValueOperand object(this, node->child1());
14033
14034 JSValueRegs objectRegs = object.jsValueRegs();
14035
14036 flushRegisters();
14037 GPRFlushedCallResult result(this);
14038 GPRReg resultGPR = result.gpr();
14039 callOperation(node->op() == ObjectKeys ? operationObjectKeys : operationObjectGetOwnPropertyNames, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), objectRegs);
14040 m_jit.exceptionCheck();
14041
14042 cellResult(resultGPR, node);
14043 break;
14044 }
14045
14046 default:
14047 RELEASE_ASSERT_NOT_REACHED();
14048 break;
14049 }
14050}
14051
14052void SpeculativeJIT::compileObjectCreate(Node* node)
14053{
14054 switch (node->child1().useKind()) {
14055 case ObjectUse: {
14056 SpeculateCellOperand prototype(this, node->child1());
14057
14058 GPRReg prototypeGPR = prototype.gpr();
14059
14060 speculateObject(node->child1(), prototypeGPR);
14061
14062 flushRegisters();
14063 GPRFlushedCallResult result(this);
14064 GPRReg resultGPR = result.gpr();
14065 callOperation(operationObjectCreateObject, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), prototypeGPR);
14066 m_jit.exceptionCheck();
14067
14068 cellResult(resultGPR, node);
14069 break;
14070 }
14071
14072 case UntypedUse: {
14073 JSValueOperand prototype(this, node->child1());
14074
14075 JSValueRegs prototypeRegs = prototype.jsValueRegs();
14076
14077 flushRegisters();
14078 GPRFlushedCallResult result(this);
14079 GPRReg resultGPR = result.gpr();
14080 callOperation(operationObjectCreate, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), prototypeRegs);
14081 m_jit.exceptionCheck();
14082
14083 cellResult(resultGPR, node);
14084 break;
14085 }
14086
14087 default:
14088 RELEASE_ASSERT_NOT_REACHED();
14089 break;
14090 }
14091}
14092
14093void SpeculativeJIT::compileCreateThis(Node* node)
14094{
14095 // Note that there is not so much profit to speculate here. The only things we
14096 // speculate on are (1) that it's a cell, since that eliminates cell checks
14097 // later if the proto is reused, and (2) if we have a FinalObject prediction
14098 // then we speculate because we want to get recompiled if it isn't (since
14099 // otherwise we'd start taking slow path a lot).
14100
14101 SpeculateCellOperand callee(this, node->child1());
14102 GPRTemporary result(this);
14103 GPRTemporary allocator(this);
14104 GPRTemporary structure(this);
14105 GPRTemporary scratch(this);
14106
14107 GPRReg calleeGPR = callee.gpr();
14108 GPRReg resultGPR = result.gpr();
14109 GPRReg allocatorGPR = allocator.gpr();
14110 GPRReg structureGPR = structure.gpr();
14111 GPRReg scratchGPR = scratch.gpr();
14112 // Rare data is only used to access the allocator & structure
14113 // We can avoid using an additional GPR this way
14114 GPRReg rareDataGPR = structureGPR;
14115 GPRReg inlineCapacityGPR = rareDataGPR;
14116
14117 MacroAssembler::JumpList slowPath;
14118
14119 slowPath.append(m_jit.branchIfNotFunction(calleeGPR));
14120 m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfExecutableOrRareData()), rareDataGPR);
14121 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)));
14122 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfAllocator() - JSFunction::rareDataTag), allocatorGPR);
14123 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfileWithPrototype::offsetOfStructure() - JSFunction::rareDataTag), structureGPR);
14124
14125 auto butterfly = TrustedImmPtr(nullptr);
14126 emitAllocateJSObject(resultGPR, JITAllocator::variable(), allocatorGPR, structureGPR, butterfly, scratchGPR, slowPath);
14127
14128 m_jit.load8(JITCompiler::Address(structureGPR, Structure::inlineCapacityOffset()), inlineCapacityGPR);
14129 m_jit.emitInitializeInlineStorage(resultGPR, inlineCapacityGPR);
14130 m_jit.mutatorFence(vm());
14131
14132 addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), calleeGPR, node->inlineCapacity()));
14133
14134 cellResult(resultGPR, node);
14135}
14136
14137void SpeculativeJIT::compileCreatePromise(Node* node)
14138{
14139 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
14140
14141 SpeculateCellOperand callee(this, node->child1());
14142 GPRTemporary result(this);
14143 GPRTemporary structure(this);
14144 GPRTemporary scratch1(this);
14145 GPRTemporary scratch2(this);
14146
14147 GPRReg calleeGPR = callee.gpr();
14148 GPRReg resultGPR = result.gpr();
14149 GPRReg structureGPR = structure.gpr();
14150 GPRReg scratch1GPR = scratch1.gpr();
14151 GPRReg scratch2GPR = scratch2.gpr();
14152 // Rare data is only used to access the allocator & structure
14153 // We can avoid using an additional GPR this way
14154 GPRReg rareDataGPR = structureGPR;
14155
14156 m_jit.move(TrustedImmPtr(m_jit.graph().registerStructure(node->isInternalPromise() ? globalObject->internalPromiseStructure() : globalObject->promiseStructure())), structureGPR);
14157 auto fastPromisePath = m_jit.branchPtr(CCallHelpers::Equal, calleeGPR, TrustedImmPtr::weakPointer(m_jit.graph(), node->isInternalPromise() ? globalObject->internalPromiseConstructor() : globalObject->promiseConstructor()));
14158
14159 MacroAssembler::JumpList slowCases;
14160
14161 slowCases.append(m_jit.branchIfNotFunction(calleeGPR));
14162 m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfExecutableOrRareData()), rareDataGPR);
14163 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)));
14164 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfInternalFunctionAllocationProfile() + InternalFunctionAllocationProfile::offsetOfStructure() - JSFunction::rareDataTag), structureGPR);
14165 slowCases.append(m_jit.branchTestPtr(CCallHelpers::Zero, structureGPR));
14166 m_jit.move(TrustedImmPtr(node->isInternalPromise() ? JSInternalPromise::info() : JSPromise::info()), scratch1GPR);
14167 slowCases.append(m_jit.branchPtr(CCallHelpers::NotEqual, scratch1GPR, CCallHelpers::Address(structureGPR, Structure::classInfoOffset())));
14168 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), globalObject), scratch1GPR);
14169 slowCases.append(m_jit.branchPtr(CCallHelpers::NotEqual, scratch1GPR, CCallHelpers::Address(structureGPR, Structure::globalObjectOffset())));
14170
14171 fastPromisePath.link(&m_jit);
14172 auto butterfly = TrustedImmPtr(nullptr);
14173 if (node->isInternalPromise())
14174 emitAllocateJSObjectWithKnownSize<JSInternalPromise>(resultGPR, structureGPR, butterfly, scratch1GPR, scratch2GPR, slowCases, sizeof(JSInternalPromise));
14175 else
14176 emitAllocateJSObjectWithKnownSize<JSPromise>(resultGPR, structureGPR, butterfly, scratch1GPR, scratch2GPR, slowCases, sizeof(JSPromise));
14177 m_jit.storeTrustedValue(jsNumber(static_cast<unsigned>(JSPromise::Status::Pending)), CCallHelpers::Address(resultGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(static_cast<unsigned>(JSPromise::Field::Flags))));
14178 m_jit.storeTrustedValue(jsUndefined(), CCallHelpers::Address(resultGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(static_cast<unsigned>(JSPromise::Field::ReactionsOrResult))));
14179 m_jit.mutatorFence(m_jit.vm());
14180
14181 addSlowPathGenerator(slowPathCall(slowCases, this, node->isInternalPromise() ? operationCreateInternalPromise : operationCreatePromise, resultGPR, TrustedImmPtr::weakPointer(m_jit.graph(), globalObject), calleeGPR));
14182
14183 cellResult(resultGPR, node);
14184}
14185
14186
14187template<typename JSClass, typename Operation>
14188void SpeculativeJIT::compileCreateInternalFieldObject(Node* node, Operation operation)
14189{
14190 JSGlobalObject* globalObject = m_jit.globalObjectFor(node->origin.semantic);
14191
14192 SpeculateCellOperand callee(this, node->child1());
14193 GPRTemporary result(this);
14194 GPRTemporary structure(this);
14195 GPRTemporary scratch1(this);
14196 GPRTemporary scratch2(this);
14197
14198 GPRReg calleeGPR = callee.gpr();
14199 GPRReg resultGPR = result.gpr();
14200 GPRReg structureGPR = structure.gpr();
14201 GPRReg scratch1GPR = scratch1.gpr();
14202 GPRReg scratch2GPR = scratch2.gpr();
14203 // Rare data is only used to access the allocator & structure
14204 // We can avoid using an additional GPR this way
14205 GPRReg rareDataGPR = structureGPR;
14206
14207 MacroAssembler::JumpList slowCases;
14208
14209 slowCases.append(m_jit.branchIfNotFunction(calleeGPR));
14210 m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfExecutableOrRareData()), rareDataGPR);
14211 slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR, CCallHelpers::TrustedImm32(JSFunction::rareDataTag)));
14212 m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfInternalFunctionAllocationProfile() + InternalFunctionAllocationProfile::offsetOfStructure() - JSFunction::rareDataTag), structureGPR);
14213 slowCases.append(m_jit.branchTestPtr(CCallHelpers::Zero, structureGPR));
14214 m_jit.move(TrustedImmPtr(JSClass::info()), scratch1GPR);
14215 slowCases.append(m_jit.branchPtr(CCallHelpers::NotEqual, scratch1GPR, CCallHelpers::Address(structureGPR, Structure::classInfoOffset())));
14216 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), globalObject), scratch1GPR);
14217 slowCases.append(m_jit.branchPtr(CCallHelpers::NotEqual, scratch1GPR, CCallHelpers::Address(structureGPR, Structure::globalObjectOffset())));
14218
14219 auto butterfly = TrustedImmPtr(nullptr);
14220 emitAllocateJSObjectWithKnownSize<JSClass>(resultGPR, structureGPR, butterfly, scratch1GPR, scratch2GPR, slowCases, sizeof(JSClass));
14221 auto initialValues = JSClass::initialValues();
14222 ASSERT(initialValues.size() == JSClass::numberOfInternalFields);
14223 for (unsigned index = 0; index < initialValues.size(); ++index)
14224 m_jit.storeTrustedValue(initialValues[index], CCallHelpers::Address(resultGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(index)));
14225 m_jit.mutatorFence(m_jit.vm());
14226
14227 addSlowPathGenerator(slowPathCall(slowCases, this, operation, resultGPR, TrustedImmPtr::weakPointer(m_jit.graph(), globalObject), calleeGPR));
14228
14229 cellResult(resultGPR, node);
14230}
14231
14232void SpeculativeJIT::compileCreateGenerator(Node* node)
14233{
14234 compileCreateInternalFieldObject<JSGenerator>(node, operationCreateGenerator);
14235}
14236
14237void SpeculativeJIT::compileCreateAsyncGenerator(Node* node)
14238{
14239 compileCreateInternalFieldObject<JSAsyncGenerator>(node, operationCreateAsyncGenerator);
14240}
14241
14242void SpeculativeJIT::compileNewObject(Node* node)
14243{
14244 GPRTemporary result(this);
14245 GPRTemporary allocator(this);
14246 GPRTemporary scratch(this);
14247
14248 GPRReg resultGPR = result.gpr();
14249 GPRReg allocatorGPR = allocator.gpr();
14250 GPRReg scratchGPR = scratch.gpr();
14251
14252 MacroAssembler::JumpList slowPath;
14253
14254 RegisteredStructure structure = node->structure();
14255 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
14256 Allocator allocatorValue = allocatorForNonVirtualConcurrently<JSFinalObject>(vm(), allocationSize, AllocatorForMode::AllocatorIfExists);
14257 if (!allocatorValue)
14258 slowPath.append(m_jit.jump());
14259 else {
14260 auto butterfly = TrustedImmPtr(nullptr);
14261 emitAllocateJSObject(resultGPR, JITAllocator::constant(allocatorValue), allocatorGPR, TrustedImmPtr(structure), butterfly, scratchGPR, slowPath);
14262 m_jit.emitInitializeInlineStorage(resultGPR, structure->inlineCapacity());
14263 m_jit.mutatorFence(vm());
14264 }
14265
14266 addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, &vm(), structure));
14267
14268 cellResult(resultGPR, node);
14269}
14270
14271template<typename JSClass, typename Operation>
14272void SpeculativeJIT::compileNewInternalFieldObjectImpl(Node* node, Operation operation)
14273{
14274 GPRTemporary result(this);
14275 GPRTemporary scratch1(this);
14276 GPRTemporary scratch2(this);
14277
14278 GPRReg resultGPR = result.gpr();
14279 GPRReg scratch1GPR = scratch1.gpr();
14280 GPRReg scratch2GPR = scratch2.gpr();
14281
14282 MacroAssembler::JumpList slowCases;
14283
14284 FrozenValue* structure = m_graph.freezeStrong(node->structure().get());
14285 auto butterfly = TrustedImmPtr(nullptr);
14286 emitAllocateJSObjectWithKnownSize<JSClass>(resultGPR, TrustedImmPtr(structure), butterfly, scratch1GPR, scratch2GPR, slowCases, sizeof(JSClass));
14287 auto initialValues = JSClass::initialValues();
14288 static_assert(initialValues.size() == JSClass::numberOfInternalFields);
14289 for (unsigned index = 0; index < initialValues.size(); ++index)
14290 m_jit.storeTrustedValue(initialValues[index], CCallHelpers::Address(resultGPR, JSInternalFieldObjectImpl<>::offsetOfInternalField(index)));
14291 m_jit.mutatorFence(m_jit.vm());
14292
14293 addSlowPathGenerator(slowPathCall(slowCases, this, operation, resultGPR, &vm(), TrustedImmPtr(structure)));
14294
14295 cellResult(resultGPR, node);
14296}
14297
14298void SpeculativeJIT::compileNewGenerator(Node* node)
14299{
14300 compileNewInternalFieldObjectImpl<JSGenerator>(node, operationNewGenerator);
14301}
14302
14303void SpeculativeJIT::compileNewAsyncGenerator(Node* node)
14304{
14305 compileNewInternalFieldObjectImpl<JSAsyncGenerator>(node, operationNewAsyncGenerator);
14306}
14307
14308void SpeculativeJIT::compileNewInternalFieldObject(Node* node)
14309{
14310 switch (node->structure()->typeInfo().type()) {
14311 case JSArrayIteratorType:
14312 compileNewInternalFieldObjectImpl<JSArrayIterator>(node, operationNewArrayIterator);
14313 break;
14314 case JSMapIteratorType:
14315 compileNewInternalFieldObjectImpl<JSMapIterator>(node, operationNewMapIterator);
14316 break;
14317 case JSSetIteratorType:
14318 compileNewInternalFieldObjectImpl<JSSetIterator>(node, operationNewSetIterator);
14319 break;
14320 case JSPromiseType: {
14321 if (node->structure()->classInfo() == JSInternalPromise::info())
14322 compileNewInternalFieldObjectImpl<JSInternalPromise>(node, operationNewInternalPromise);
14323 else {
14324 ASSERT(node->structure()->classInfo() == JSPromise::info());
14325 compileNewInternalFieldObjectImpl<JSPromise>(node, operationNewPromise);
14326 }
14327 break;
14328 }
14329 default:
14330 DFG_CRASH(m_graph, node, "Bad structure");
14331 }
14332}
14333
14334void SpeculativeJIT::compileToPrimitive(Node* node)
14335{
14336 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
14337 JSValueOperand argument(this, node->child1());
14338 JSValueRegsTemporary result(this, Reuse, argument);
14339
14340 JSValueRegs argumentRegs = argument.jsValueRegs();
14341 JSValueRegs resultRegs = result.regs();
14342
14343 argument.use();
14344
14345 MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(argumentRegs);
14346 MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(argumentRegs.payloadGPR());
14347
14348 alreadyPrimitive.link(&m_jit);
14349 m_jit.moveValueRegs(argumentRegs, resultRegs);
14350
14351 addSlowPathGenerator(slowPathCall(notPrimitive, this, operationToPrimitive, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentRegs));
14352
14353 jsValueResult(resultRegs, node, DataFormatJS, UseChildrenCalledExplicitly);
14354}
14355
14356void SpeculativeJIT::compileToPropertyKey(Node* node)
14357{
14358 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
14359 JSValueOperand argument(this, node->child1());
14360 JSValueRegsTemporary result(this, Reuse, argument);
14361
14362 JSValueRegs argumentRegs = argument.jsValueRegs();
14363 JSValueRegs resultRegs = result.regs();
14364
14365 argument.use();
14366
14367 MacroAssembler::JumpList slowCases;
14368 slowCases.append(m_jit.branchIfNotCell(argumentRegs));
14369 MacroAssembler::Jump alreadyPropertyKey = m_jit.branchIfSymbol(argumentRegs.payloadGPR());
14370 slowCases.append(m_jit.branchIfNotString(argumentRegs.payloadGPR()));
14371
14372 alreadyPropertyKey.link(&m_jit);
14373 m_jit.moveValueRegs(argumentRegs, resultRegs);
14374
14375 addSlowPathGenerator(slowPathCall(slowCases, this, operationToPropertyKey, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentRegs));
14376
14377 jsValueResult(resultRegs, node, DataFormatJSCell, UseChildrenCalledExplicitly);
14378}
14379
14380void SpeculativeJIT::compileToNumeric(Node* node)
14381{
14382 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
14383 JSValueOperand argument(this, node->child1());
14384 JSValueRegsTemporary result(this);
14385 GPRTemporary temp(this);
14386
14387 JSValueRegs argumentRegs = argument.jsValueRegs();
14388 JSValueRegs resultRegs = result.regs();
14389 GPRReg scratch = temp.gpr();
14390 // FIXME: add a fast path for BigInt32 here.
14391 // https://bugs.webkit.org/show_bug.cgi?id=211064
14392
14393 MacroAssembler::JumpList slowCases;
14394
14395 MacroAssembler::Jump notCell = m_jit.branchIfNotCell(argumentRegs);
14396 slowCases.append(m_jit.branchIfNotHeapBigInt(argumentRegs.payloadGPR()));
14397 MacroAssembler::Jump isHeapBigInt = m_jit.jump();
14398
14399 notCell.link(&m_jit);
14400 slowCases.append(m_jit.branchIfNotNumber(argumentRegs, scratch));
14401
14402 isHeapBigInt.link(&m_jit);
14403 m_jit.moveValueRegs(argumentRegs, resultRegs);
14404
14405 addSlowPathGenerator(slowPathCall(slowCases, this, operationToNumeric, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentRegs));
14406
14407 jsValueResult(resultRegs, node, DataFormatJS);
14408}
14409
14410void SpeculativeJIT::compileCallNumberConstructor(Node* node)
14411{
14412#if USE(BIGINT32)
14413 if (node->child1().useKind() == BigInt32Use) {
14414 SpeculateBigInt32Operand operand(this, node->child1());
14415 GPRTemporary result(this);
14416
14417 GPRReg operandGPR = operand.gpr();
14418 GPRReg resultGPR = result.gpr();
14419
14420 m_jit.unboxBigInt32(operandGPR, resultGPR);
14421 strictInt32Result(resultGPR, node);
14422 return;
14423 }
14424#endif
14425
14426 DFG_ASSERT(m_jit.graph(), node, node->child1().useKind() == UntypedUse, node->child1().useKind());
14427 JSValueOperand argument(this, node->child1());
14428 JSValueRegsTemporary result(this);
14429 GPRTemporary temp(this);
14430
14431 JSValueRegs argumentRegs = argument.jsValueRegs();
14432 JSValueRegs resultRegs = result.regs();
14433 GPRReg tempGPR = temp.gpr();
14434 // FIXME: add a fast path for BigInt32 here.
14435 // https://bugs.webkit.org/show_bug.cgi?id=211064
14436
14437 CCallHelpers::JumpList slowCases;
14438 slowCases.append(m_jit.branchIfNotNumber(argumentRegs, tempGPR));
14439 m_jit.moveValueRegs(argumentRegs, resultRegs);
14440 addSlowPathGenerator(slowPathCall(slowCases, this, operationCallNumberConstructor, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), argumentRegs));
14441
14442 jsValueResult(resultRegs, node);
14443}
14444
14445void SpeculativeJIT::compileLogShadowChickenPrologue(Node* node)
14446{
14447 flushRegisters();
14448 prepareForExternalCall();
14449 m_jit.emitStoreCodeOrigin(node->origin.semantic);
14450
14451 GPRTemporary scratch1(this, GPRInfo::nonArgGPR0); // This must be a non-argument GPR.
14452 GPRReg scratch1Reg = scratch1.gpr();
14453 GPRTemporary scratch2(this);
14454 GPRReg scratch2Reg = scratch2.gpr();
14455 GPRTemporary shadowPacket(this);
14456 GPRReg shadowPacketReg = shadowPacket.gpr();
14457
14458 m_jit.ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
14459
14460 SpeculateCellOperand scope(this, node->child1());
14461 GPRReg scopeReg = scope.gpr();
14462
14463 m_jit.logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, scopeReg);
14464 noResult(node);
14465}
14466
14467void SpeculativeJIT::compileLogShadowChickenTail(Node* node)
14468{
14469 flushRegisters();
14470 prepareForExternalCall();
14471 CallSiteIndex callSiteIndex = m_jit.emitStoreCodeOrigin(node->origin.semantic);
14472
14473 GPRTemporary scratch1(this, GPRInfo::nonArgGPR0); // This must be a non-argument GPR.
14474 GPRReg scratch1Reg = scratch1.gpr();
14475 GPRTemporary scratch2(this);
14476 GPRReg scratch2Reg = scratch2.gpr();
14477 GPRTemporary shadowPacket(this);
14478 GPRReg shadowPacketReg = shadowPacket.gpr();
14479
14480 m_jit.ensureShadowChickenPacket(vm(), shadowPacketReg, scratch1Reg, scratch2Reg);
14481
14482 JSValueOperand thisValue(this, node->child1());
14483 JSValueRegs thisRegs = thisValue.jsValueRegs();
14484 SpeculateCellOperand scope(this, node->child2());
14485 GPRReg scopeReg = scope.gpr();
14486
14487 m_jit.logShadowChickenTailPacket(shadowPacketReg, thisRegs, scopeReg, m_jit.codeBlock(), callSiteIndex);
14488 noResult(node);
14489}
14490
14491void SpeculativeJIT::compileSetAdd(Node* node)
14492{
14493 SpeculateCellOperand set(this, node->child1());
14494 JSValueOperand key(this, node->child2());
14495 SpeculateInt32Operand hash(this, node->child3());
14496
14497 GPRReg setGPR = set.gpr();
14498 JSValueRegs keyRegs = key.jsValueRegs();
14499 GPRReg hashGPR = hash.gpr();
14500
14501 speculateSetObject(node->child1(), setGPR);
14502
14503 flushRegisters();
14504 GPRFlushedCallResult result(this);
14505 GPRReg resultGPR = result.gpr();
14506 callOperation(operationSetAdd, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), setGPR, keyRegs, hashGPR);
14507 m_jit.exceptionCheck();
14508 cellResult(resultGPR, node);
14509}
14510
14511void SpeculativeJIT::compileMapSet(Node* node)
14512{
14513 SpeculateCellOperand map(this, m_jit.graph().varArgChild(node, 0));
14514 JSValueOperand key(this, m_jit.graph().varArgChild(node, 1));
14515 JSValueOperand value(this, m_jit.graph().varArgChild(node, 2));
14516 SpeculateInt32Operand hash(this, m_jit.graph().varArgChild(node, 3));
14517
14518 GPRReg mapGPR = map.gpr();
14519 JSValueRegs keyRegs = key.jsValueRegs();
14520 JSValueRegs valueRegs = value.jsValueRegs();
14521 GPRReg hashGPR = hash.gpr();
14522
14523 speculateMapObject(m_jit.graph().varArgChild(node, 0), mapGPR);
14524
14525 flushRegisters();
14526 GPRFlushedCallResult result(this);
14527 GPRReg resultGPR = result.gpr();
14528 callOperation(operationMapSet, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), mapGPR, keyRegs, valueRegs, hashGPR);
14529 m_jit.exceptionCheck();
14530 cellResult(resultGPR, node);
14531}
14532
14533void SpeculativeJIT::compileWeakMapGet(Node* node)
14534{
14535 GPRTemporary mask(this);
14536 GPRTemporary buffer(this);
14537 JSValueRegsTemporary result(this);
14538
14539 GPRReg maskGPR = mask.gpr();
14540 GPRReg bufferGPR = buffer.gpr();
14541 JSValueRegs resultRegs = result.regs();
14542
14543 GPRTemporary index;
14544 GPRReg indexGPR { InvalidGPRReg };
14545 {
14546 SpeculateInt32Operand hash(this, node->child3());
14547 GPRReg hashGPR = hash.gpr();
14548 index = GPRTemporary(this, Reuse, hash);
14549 indexGPR = index.gpr();
14550 m_jit.move(hashGPR, indexGPR);
14551 }
14552
14553 {
14554 SpeculateCellOperand weakMap(this, node->child1());
14555 GPRReg weakMapGPR = weakMap.gpr();
14556 if (node->child1().useKind() == WeakMapObjectUse)
14557 speculateWeakMapObject(node->child1(), weakMapGPR);
14558 else
14559 speculateWeakSetObject(node->child1(), weakMapGPR);
14560
14561 ASSERT(WeakMapImpl<WeakMapBucket<WeakMapBucketDataKey>>::offsetOfCapacity() == WeakMapImpl<WeakMapBucket<WeakMapBucketDataKeyValue>>::offsetOfCapacity());
14562 ASSERT(WeakMapImpl<WeakMapBucket<WeakMapBucketDataKey>>::offsetOfBuffer() == WeakMapImpl<WeakMapBucket<WeakMapBucketDataKeyValue>>::offsetOfBuffer());
14563 m_jit.load32(MacroAssembler::Address(weakMapGPR, WeakMapImpl<WeakMapBucket<WeakMapBucketDataKey>>::offsetOfCapacity()), maskGPR);
14564 m_jit.loadPtr(MacroAssembler::Address(weakMapGPR, WeakMapImpl<WeakMapBucket<WeakMapBucketDataKey>>::offsetOfBuffer()), bufferGPR);
14565 }
14566
14567 SpeculateCellOperand key(this, node->child2());
14568 GPRReg keyGPR = key.gpr();
14569 speculateObject(node->child2(), keyGPR);
14570
14571#if USE(JSVALUE32_64)
14572 GPRReg bucketGPR = resultRegs.tagGPR();
14573#else
14574 GPRTemporary bucket(this);
14575 GPRReg bucketGPR = bucket.gpr();
14576#endif
14577
14578 m_jit.sub32(TrustedImm32(1), maskGPR);
14579
14580 MacroAssembler::Label loop = m_jit.label();
14581 m_jit.and32(maskGPR, indexGPR);
14582 if (node->child1().useKind() == WeakSetObjectUse) {
14583 static_assert(sizeof(WeakMapBucket<WeakMapBucketDataKey>) == sizeof(void*), "");
14584 m_jit.zeroExtend32ToWord(indexGPR, bucketGPR);
14585 m_jit.lshiftPtr(MacroAssembler::Imm32(sizeof(void*) == 4 ? 2 : 3), bucketGPR);
14586 m_jit.addPtr(bufferGPR, bucketGPR);
14587 } else {
14588 ASSERT(node->child1().useKind() == WeakMapObjectUse);
14589 static_assert(sizeof(WeakMapBucket<WeakMapBucketDataKeyValue>) == 16, "");
14590 m_jit.zeroExtend32ToWord(indexGPR, bucketGPR);
14591 m_jit.lshiftPtr(MacroAssembler::Imm32(4), bucketGPR);
14592 m_jit.addPtr(bufferGPR, bucketGPR);
14593 }
14594
14595 m_jit.loadPtr(MacroAssembler::Address(bucketGPR, WeakMapBucket<WeakMapBucketDataKeyValue>::offsetOfKey()), resultRegs.payloadGPR());
14596
14597 // They're definitely the same value, we found the bucket we were looking for!
14598 // The deleted key comparison is also done with this.
14599 auto found = m_jit.branchPtr(MacroAssembler::Equal, resultRegs.payloadGPR(), keyGPR);
14600
14601 auto notPresentInTable = m_jit.branchTestPtr(MacroAssembler::Zero, resultRegs.payloadGPR());
14602
14603 m_jit.add32(TrustedImm32(1), indexGPR);
14604 m_jit.jump().linkTo(loop, &m_jit);
14605
14606#if USE(JSVALUE32_64)
14607 notPresentInTable.link(&m_jit);
14608 m_jit.moveValue(JSValue(), resultRegs);
14609 auto notPresentInTableDone = m_jit.jump();
14610
14611 found.link(&m_jit);
14612 if (node->child1().useKind() == WeakSetObjectUse)
14613 m_jit.move(TrustedImm32(JSValue::CellTag), resultRegs.tagGPR());
14614 else
14615 m_jit.loadValue(MacroAssembler::Address(bucketGPR, WeakMapBucket<WeakMapBucketDataKeyValue>::offsetOfValue()), resultRegs);
14616
14617 notPresentInTableDone.link(&m_jit);
14618#else
14619 notPresentInTable.link(&m_jit);
14620 found.link(&m_jit);
14621
14622 // In 64bit environment, Empty bucket has JSEmpty value. Empty key is JSEmpty.
14623 // If empty bucket is found, we can use the same path used for the case of finding a bucket.
14624 if (node->child1().useKind() == WeakMapObjectUse)
14625 m_jit.loadValue(MacroAssembler::Address(bucketGPR, WeakMapBucket<WeakMapBucketDataKeyValue>::offsetOfValue()), resultRegs);
14626#endif
14627
14628 jsValueResult(resultRegs, node);
14629}
14630
14631void SpeculativeJIT::compileWeakSetAdd(Node* node)
14632{
14633 SpeculateCellOperand set(this, node->child1());
14634 SpeculateCellOperand key(this, node->child2());
14635 SpeculateInt32Operand hash(this, node->child3());
14636
14637 GPRReg setGPR = set.gpr();
14638 GPRReg keyGPR = key.gpr();
14639 GPRReg hashGPR = hash.gpr();
14640
14641 speculateWeakSetObject(node->child1(), setGPR);
14642 speculateObject(node->child2(), keyGPR);
14643
14644 flushRegisters();
14645 callOperation(operationWeakSetAdd, &vm(), setGPR, keyGPR, hashGPR);
14646 m_jit.exceptionCheck();
14647 noResult(node);
14648}
14649
14650void SpeculativeJIT::compileWeakMapSet(Node* node)
14651{
14652 SpeculateCellOperand map(this, m_jit.graph().varArgChild(node, 0));
14653 SpeculateCellOperand key(this, m_jit.graph().varArgChild(node, 1));
14654 JSValueOperand value(this, m_jit.graph().varArgChild(node, 2));
14655 SpeculateInt32Operand hash(this, m_jit.graph().varArgChild(node, 3));
14656
14657 GPRReg mapGPR = map.gpr();
14658 GPRReg keyGPR = key.gpr();
14659 JSValueRegs valueRegs = value.jsValueRegs();
14660 GPRReg hashGPR = hash.gpr();
14661
14662 speculateWeakMapObject(m_jit.graph().varArgChild(node, 0), mapGPR);
14663 speculateObject(m_jit.graph().varArgChild(node, 1), keyGPR);
14664
14665 flushRegisters();
14666 callOperation(operationWeakMapSet, &vm(), mapGPR, keyGPR, valueRegs, hashGPR);
14667 m_jit.exceptionCheck();
14668 noResult(node);
14669}
14670
14671void SpeculativeJIT::compileGetPrototypeOf(Node* node)
14672{
14673 GPRTemporary temp(this);
14674 GPRTemporary temp2(this);
14675
14676 GPRReg tempGPR = temp.gpr();
14677 GPRReg temp2GPR = temp2.gpr();
14678
14679#if USE(JSVALUE64)
14680 JSValueRegs resultRegs(tempGPR);
14681#else
14682 JSValueRegs resultRegs(temp2GPR, tempGPR);
14683#endif
14684
14685 switch (node->child1().useKind()) {
14686 case ArrayUse:
14687 case FunctionUse:
14688 case FinalObjectUse: {
14689 SpeculateCellOperand object(this, node->child1());
14690 GPRReg objectGPR = object.gpr();
14691
14692 switch (node->child1().useKind()) {
14693 case ArrayUse:
14694 speculateArray(node->child1(), objectGPR);
14695 break;
14696 case FunctionUse:
14697 speculateFunction(node->child1(), objectGPR);
14698 break;
14699 case FinalObjectUse:
14700 speculateFinalObject(node->child1(), objectGPR);
14701 break;
14702 default:
14703 RELEASE_ASSERT_NOT_REACHED();
14704 break;
14705 }
14706
14707 m_jit.emitLoadStructure(vm(), objectGPR, tempGPR, temp2GPR);
14708
14709 AbstractValue& value = m_state.forNode(node->child1());
14710 if ((value.m_type && !(value.m_type & ~SpecObject)) && value.m_structure.isFinite()) {
14711 bool hasPolyProto = false;
14712 bool hasMonoProto = false;
14713 value.m_structure.forEach([&] (RegisteredStructure structure) {
14714 if (structure->hasPolyProto())
14715 hasPolyProto = true;
14716 else
14717 hasMonoProto = true;
14718 });
14719
14720 if (hasMonoProto && !hasPolyProto) {
14721 m_jit.loadValue(MacroAssembler::Address(tempGPR, Structure::prototypeOffset()), resultRegs);
14722 jsValueResult(resultRegs, node);
14723 return;
14724 }
14725
14726 if (hasPolyProto && !hasMonoProto) {
14727 m_jit.loadValue(JITCompiler::Address(objectGPR, offsetRelativeToBase(knownPolyProtoOffset)), resultRegs);
14728 jsValueResult(resultRegs, node);
14729 return;
14730 }
14731 }
14732
14733 m_jit.loadValue(MacroAssembler::Address(tempGPR, Structure::prototypeOffset()), resultRegs);
14734 auto hasMonoProto = m_jit.branchIfNotEmpty(resultRegs);
14735 m_jit.loadValue(JITCompiler::Address(objectGPR, offsetRelativeToBase(knownPolyProtoOffset)), resultRegs);
14736 hasMonoProto.link(&m_jit);
14737 jsValueResult(resultRegs, node);
14738 return;
14739 }
14740 case ObjectUse: {
14741 SpeculateCellOperand object(this, node->child1());
14742 GPRReg objectGPR = object.gpr();
14743 speculateObject(node->child1(), objectGPR);
14744
14745 JITCompiler::JumpList slowCases;
14746 m_jit.emitLoadPrototype(vm(), objectGPR, resultRegs, temp2GPR, slowCases);
14747 addSlowPathGenerator(slowPathCall(slowCases, this, operationGetPrototypeOfObject,
14748 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), objectGPR));
14749
14750 jsValueResult(resultRegs, node);
14751 return;
14752 }
14753 default: {
14754 JSValueOperand value(this, node->child1());
14755 JSValueRegs valueRegs = value.jsValueRegs();
14756
14757 JITCompiler::JumpList slowCases;
14758 slowCases.append(m_jit.branchIfNotCell(valueRegs));
14759
14760 GPRReg valueGPR = valueRegs.payloadGPR();
14761 slowCases.append(m_jit.branchIfNotObject(valueGPR));
14762
14763 m_jit.emitLoadPrototype(vm(), valueGPR, resultRegs, temp2GPR, slowCases);
14764 addSlowPathGenerator(slowPathCall(slowCases, this, operationGetPrototypeOf,
14765 resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), valueRegs));
14766
14767 jsValueResult(resultRegs, node);
14768 return;
14769 }
14770 }
14771}
14772
14773void SpeculativeJIT::compileIdentity(Node* node)
14774{
14775 speculate(node, node->child1());
14776 switch (node->child1().useKind()) {
14777#if USE(JSVALUE64)
14778 case DoubleRepAnyIntUse:
14779#endif
14780 case DoubleRepUse:
14781 case DoubleRepRealUse: {
14782 SpeculateDoubleOperand op(this, node->child1());
14783 FPRTemporary scratch(this, op);
14784 m_jit.moveDouble(op.fpr(), scratch.fpr());
14785 doubleResult(scratch.fpr(), node);
14786 break;
14787 }
14788#if USE(JSVALUE64)
14789 case Int52RepUse: {
14790 SpeculateInt52Operand op(this, node->child1());
14791 GPRTemporary result(this, Reuse, op);
14792 m_jit.move(op.gpr(), result.gpr());
14793 int52Result(result.gpr(), node);
14794 break;
14795 }
14796#endif
14797 default: {
14798 JSValueOperand op(this, node->child1(), ManualOperandSpeculation);
14799 JSValueRegsTemporary result(this, Reuse, op);
14800 JSValueRegs opRegs = op.jsValueRegs();
14801 JSValueRegs resultRegs = result.regs();
14802 m_jit.moveValueRegs(opRegs, resultRegs);
14803 jsValueResult(resultRegs, node);
14804 break;
14805 }
14806 }
14807}
14808
14809void SpeculativeJIT::compileMiscStrictEq(Node* node)
14810{
14811 JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
14812 JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
14813 GPRTemporary result(this);
14814
14815 if (node->child1().useKind() == MiscUse)
14816 speculateMisc(node->child1(), op1.jsValueRegs());
14817 if (node->child2().useKind() == MiscUse)
14818 speculateMisc(node->child2(), op2.jsValueRegs());
14819
14820#if USE(JSVALUE64)
14821 m_jit.compare64(JITCompiler::Equal, op1.gpr(), op2.gpr(), result.gpr());
14822#else
14823 m_jit.move(TrustedImm32(0), result.gpr());
14824 JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
14825 m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
14826 notEqual.link(&m_jit);
14827#endif
14828 unblessedBooleanResult(result.gpr(), node);
14829}
14830
14831void SpeculativeJIT::emitInitializeButterfly(GPRReg storageGPR, GPRReg sizeGPR, JSValueRegs emptyValueRegs, GPRReg scratchGPR)
14832{
14833 m_jit.zeroExtend32ToWord(sizeGPR, scratchGPR);
14834 MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
14835 MacroAssembler::Label loop = m_jit.label();
14836 m_jit.sub32(TrustedImm32(1), scratchGPR);
14837 m_jit.storeValue(emptyValueRegs, MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight));
14838 m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
14839 done.link(&m_jit);
14840}
14841
14842void SpeculativeJIT::compileAllocateNewArrayWithSize(JSGlobalObject* globalObject, GPRReg resultGPR, GPRReg sizeGPR, IndexingType indexingType, bool shouldConvertLargeSizeToArrayStorage)
14843{
14844 GPRTemporary storage(this);
14845 GPRTemporary scratch(this);
14846 GPRTemporary scratch2(this);
14847
14848 GPRReg storageGPR = storage.gpr();
14849 GPRReg scratchGPR = scratch.gpr();
14850 GPRReg scratch2GPR = scratch2.gpr();
14851
14852 m_jit.move(TrustedImmPtr(nullptr), storageGPR);
14853
14854 MacroAssembler::JumpList slowCases;
14855 if (shouldConvertLargeSizeToArrayStorage)
14856 slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
14857#if ASSERT_ENABLED
14858 else {
14859 MacroAssembler::Jump lengthIsWithinLimits;
14860 lengthIsWithinLimits = m_jit.branch32(MacroAssembler::Below, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
14861 m_jit.abortWithReason(UncheckedOverflow);
14862 lengthIsWithinLimits.link(&m_jit);
14863 }
14864#endif // ASSERT_ENABLED
14865
14866 // We can use resultGPR as a scratch right now.
14867 emitAllocateButterfly(storageGPR, sizeGPR, scratchGPR, scratch2GPR, resultGPR, slowCases);
14868
14869#if USE(JSVALUE64)
14870 JSValueRegs emptyValueRegs(scratchGPR);
14871 if (hasDouble(indexingType))
14872 m_jit.move(TrustedImm64(bitwise_cast<int64_t>(PNaN)), emptyValueRegs.gpr());
14873 else
14874 m_jit.move(TrustedImm64(JSValue::encode(JSValue())), emptyValueRegs.gpr());
14875#else
14876 JSValueRegs emptyValueRegs(scratchGPR, scratch2GPR);
14877 if (hasDouble(indexingType))
14878 m_jit.moveValue(JSValue(JSValue::EncodeAsDouble, PNaN), emptyValueRegs);
14879 else
14880 m_jit.moveValue(JSValue(), emptyValueRegs);
14881#endif
14882 emitInitializeButterfly(storageGPR, sizeGPR, emptyValueRegs, resultGPR);
14883
14884 RegisteredStructure structure = m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType));
14885
14886 emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
14887
14888 m_jit.mutatorFence(vm());
14889
14890 addSlowPathGenerator(makeUnique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
14891 slowCases, this, operationNewArrayWithSize, resultGPR,
14892 TrustedImmPtr::weakPointer(m_graph, globalObject),
14893 structure,
14894 shouldConvertLargeSizeToArrayStorage ? m_jit.graph().registerStructure(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)) : structure,
14895 sizeGPR, storageGPR));
14896}
14897
14898void SpeculativeJIT::compileHasIndexedProperty(Node* node, S_JITOperation_GCZ slowPathOperation)
14899{
14900 SpeculateCellOperand base(this, m_graph.varArgChild(node, 0));
14901 SpeculateStrictInt32Operand index(this, m_graph.varArgChild(node, 1));
14902 GPRTemporary result(this);
14903
14904 GPRReg baseGPR = base.gpr();
14905 GPRReg indexGPR = index.gpr();
14906 GPRReg resultGPR = result.gpr();
14907
14908 MacroAssembler::JumpList slowCases;
14909 ArrayMode mode = node->arrayMode();
14910 switch (mode.type()) {
14911 case Array::Int32:
14912 case Array::Contiguous: {
14913 ASSERT(!!m_graph.varArgChild(node, 2));
14914 StorageOperand storage(this, m_graph.varArgChild(node, 2));
14915 GPRTemporary scratch(this);
14916
14917 GPRReg storageGPR = storage.gpr();
14918 GPRReg scratchGPR = scratch.gpr();
14919
14920 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
14921
14922 if (mode.isInBounds())
14923 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
14924 else
14925 slowCases.append(outOfBounds);
14926
14927#if USE(JSVALUE64)
14928 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchGPR);
14929#else
14930 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
14931#endif
14932
14933 if (mode.isInBoundsSaneChain()) {
14934 m_jit.isNotEmpty(scratchGPR, resultGPR);
14935 break;
14936 }
14937
14938 MacroAssembler::Jump isHole = m_jit.branchIfEmpty(scratchGPR);
14939 if (!mode.isInBounds())
14940 slowCases.append(isHole);
14941 else
14942 speculationCheck(LoadFromHole, JSValueRegs(), nullptr, isHole);
14943 m_jit.move(TrustedImm32(1), resultGPR);
14944 break;
14945 }
14946 case Array::Double: {
14947 ASSERT(!!m_graph.varArgChild(node, 2));
14948 StorageOperand storage(this, m_graph.varArgChild(node, 2));
14949 FPRTemporary scratch(this);
14950 FPRReg scratchFPR = scratch.fpr();
14951 GPRReg storageGPR = storage.gpr();
14952
14953 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
14954
14955 if (mode.isInBounds())
14956 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
14957 else
14958 slowCases.append(outOfBounds);
14959
14960 m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
14961
14962 if (mode.isInBoundsSaneChain()) {
14963 m_jit.compareDouble(MacroAssembler::DoubleEqualAndOrdered, scratchFPR, scratchFPR, resultGPR);
14964 break;
14965 }
14966
14967 MacroAssembler::Jump isHole = m_jit.branchIfNaN(scratchFPR);
14968 if (!mode.isInBounds())
14969 slowCases.append(isHole);
14970 else
14971 speculationCheck(LoadFromHole, JSValueRegs(), nullptr, isHole);
14972 m_jit.move(TrustedImm32(1), resultGPR);
14973 break;
14974 }
14975 case Array::ArrayStorage: {
14976 ASSERT(!!m_graph.varArgChild(node, 2));
14977 StorageOperand storage(this, m_graph.varArgChild(node, 2));
14978 GPRTemporary scratch(this);
14979
14980 GPRReg storageGPR = storage.gpr();
14981 GPRReg scratchGPR = scratch.gpr();
14982
14983 MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
14984 if (mode.isInBounds())
14985 speculationCheck(OutOfBounds, JSValueRegs(), nullptr, outOfBounds);
14986 else
14987 slowCases.append(outOfBounds);
14988
14989#if USE(JSVALUE64)
14990 m_jit.load64(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset()), scratchGPR);
14991#else
14992 m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
14993#endif
14994
14995 if (mode.isInBoundsSaneChain()) {
14996 m_jit.isNotEmpty(scratchGPR, resultGPR);
14997 break;
14998 }
14999
15000 MacroAssembler::Jump isHole = m_jit.branchIfEmpty(scratchGPR);
15001 if (!mode.isInBounds() || mode.isInBoundsSaneChain())
15002 slowCases.append(isHole);
15003 else
15004 speculationCheck(LoadFromHole, JSValueRegs(), nullptr, isHole);
15005 m_jit.move(TrustedImm32(1), resultGPR);
15006 break;
15007 }
15008 default: {
15009 // FIXME: Optimize TypedArrays in HasIndexedProperty IC
15010 // https://bugs.webkit.org/show_bug.cgi?id=221183
15011 slowCases.append(m_jit.jump());
15012 break;
15013 }
15014 }
15015
15016 addSlowPathGenerator(slowPathCall(slowCases, this, slowPathOperation, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, indexGPR));
15017
15018 unblessedBooleanResult(resultGPR, node);
15019}
15020
15021void SpeculativeJIT::compileGetDirectPname(Node* node)
15022{
15023 Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
15024 Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
15025 Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
15026
15027 SpeculateCellOperand base(this, baseEdge);
15028 SpeculateCellOperand property(this, propertyEdge);
15029 GPRReg baseGPR = base.gpr();
15030 GPRReg propertyGPR = property.gpr();
15031
15032 Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
15033 SpeculateStrictInt32Operand index(this, indexEdge);
15034 SpeculateCellOperand enumerator(this, enumeratorEdge);
15035 GPRTemporary scratch(this);
15036 JSValueRegsTemporary result(this);
15037
15038 GPRReg indexGPR = index.gpr();
15039 GPRReg enumeratorGPR = enumerator.gpr();
15040 GPRReg scratchGPR = scratch.gpr();
15041 JSValueRegs resultRegs = result.regs();
15042
15043 MacroAssembler::JumpList slowPath;
15044
15045 // Check the structure
15046 m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
15047 slowPath.append(
15048 m_jit.branch32(
15049 MacroAssembler::NotEqual,
15050 scratchGPR,
15051 MacroAssembler::Address(
15052 enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset())));
15053
15054 // Compute the offset
15055 // If index is less than the enumerator's cached inline storage, then it's an inline access
15056 MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
15057 indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
15058
15059 m_jit.loadValue(MacroAssembler::BaseIndex(baseGPR, indexGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage()), resultRegs);
15060
15061 MacroAssembler::Jump done = m_jit.jump();
15062
15063 // Otherwise it's out of line
15064 outOfLineAccess.link(&m_jit);
15065 m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultRegs.payloadGPR());
15066 m_jit.move(indexGPR, scratchGPR);
15067 m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR);
15068 m_jit.neg32(scratchGPR);
15069 m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
15070 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
15071 m_jit.loadValue(MacroAssembler::BaseIndex(resultRegs.payloadGPR(), scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty), resultRegs);
15072
15073 done.link(&m_jit);
15074
15075 addSlowPathGenerator(slowPathCall(slowPath, this, operationGetByValCell, resultRegs, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), baseGPR, CCallHelpers::CellValue(propertyGPR)));
15076
15077 jsValueResult(resultRegs, node);
15078}
15079
15080void SpeculativeJIT::compileExtractCatchLocal(Node* node)
15081{
15082 JSValueRegsTemporary result(this);
15083 JSValueRegs resultRegs = result.regs();
15084
15085 JSValue* ptr = &reinterpret_cast<JSValue*>(m_jit.jitCode()->common.catchOSREntryBuffer->dataBuffer())[node->catchOSREntryIndex()];
15086 m_jit.loadValue(ptr, resultRegs);
15087 jsValueResult(resultRegs, node);
15088}
15089
15090void SpeculativeJIT::compileClearCatchLocals(Node* node)
15091{
15092 ScratchBuffer* scratchBuffer = m_jit.jitCode()->common.catchOSREntryBuffer;
15093 ASSERT(scratchBuffer);
15094 GPRTemporary scratch(this);
15095 GPRReg scratchGPR = scratch.gpr();
15096 m_jit.move(TrustedImmPtr(scratchBuffer->addressOfActiveLength()), scratchGPR);
15097 m_jit.storePtr(TrustedImmPtr(nullptr), scratchGPR);
15098 noResult(node);
15099}
15100
15101void SpeculativeJIT::compileProfileType(Node* node)
15102{
15103 JSValueOperand value(this, node->child1());
15104 GPRTemporary scratch1(this);
15105 GPRTemporary scratch2(this);
15106 GPRTemporary scratch3(this);
15107
15108 JSValueRegs valueRegs = value.jsValueRegs();
15109 GPRReg scratch1GPR = scratch1.gpr();
15110 GPRReg scratch2GPR = scratch2.gpr();
15111 GPRReg scratch3GPR = scratch3.gpr();
15112
15113 MacroAssembler::JumpList jumpToEnd;
15114
15115 jumpToEnd.append(m_jit.branchIfEmpty(valueRegs));
15116
15117 TypeLocation* cachedTypeLocation = node->typeLocation();
15118 // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
15119 // These typechecks are inlined to match those of the 64-bit JSValue type checks.
15120 if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
15121 jumpToEnd.append(m_jit.branchIfUndefined(valueRegs));
15122 else if (cachedTypeLocation->m_lastSeenType == TypeNull)
15123 jumpToEnd.append(m_jit.branchIfNull(valueRegs));
15124 else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
15125 jumpToEnd.append(m_jit.branchIfBoolean(valueRegs, scratch1GPR));
15126 else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
15127 jumpToEnd.append(m_jit.branchIfInt32(valueRegs));
15128 else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
15129 jumpToEnd.append(m_jit.branchIfNumber(valueRegs, scratch1GPR));
15130 else if (cachedTypeLocation->m_lastSeenType == TypeString) {
15131 MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
15132 jumpToEnd.append(m_jit.branchIfString(valueRegs.payloadGPR()));
15133 isNotCell.link(&m_jit);
15134 }
15135
15136 // Load the TypeProfilerLog into Scratch2.
15137 TypeProfilerLog* cachedTypeProfilerLog = vm().typeProfilerLog();
15138 m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR);
15139
15140 // Load the next LogEntry into Scratch1.
15141 m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR);
15142
15143 // Store the JSValue onto the log entry.
15144 m_jit.storeValue(valueRegs, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset()));
15145
15146 // Store the structureID of the cell if valueRegs is a cell, otherwise, store 0 on the log entry.
15147 MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(valueRegs);
15148 m_jit.load32(MacroAssembler::Address(valueRegs.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR);
15149 m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
15150 MacroAssembler::Jump skipIsCell = m_jit.jump();
15151 isNotCell.link(&m_jit);
15152 m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
15153 skipIsCell.link(&m_jit);
15154
15155 // Store the typeLocation on the log entry.
15156 m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR);
15157 m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset()));
15158
15159 // Increment the current log entry.
15160 m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR);
15161 m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()));
15162 MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
15163 addSlowPathGenerator(
15164 slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult, TrustedImmPtr(&vm())));
15165
15166 jumpToEnd.link(&m_jit);
15167
15168 noResult(node);
15169}
15170
15171void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, JSValueRegs valueRegs, GPRReg stubInfoGPR, GPRReg scratchGPR, CacheableIdentifier identifier, PutKind putKind, ECMAMode ecmaMode, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
15172{
15173 RegisterSet usedRegisters = this->usedRegisters();
15174 if (spillMode == DontSpill) {
15175 // We've already flushed registers to the stack, we don't need to spill these.
15176 usedRegisters.set(baseGPR, false);
15177 usedRegisters.set(valueRegs, false);
15178 if (stubInfoGPR != InvalidGPRReg)
15179 usedRegisters.set(stubInfoGPR, false);
15180 }
15181 CallSiteIndex callSite = m_jit.recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(codeOrigin, m_stream->size());
15182 JITPutByIdGenerator gen(
15183 m_jit.codeBlock(), JITType::DFGJIT, codeOrigin, callSite, usedRegisters, identifier,
15184 JSValueRegs::payloadOnly(baseGPR), valueRegs, stubInfoGPR,
15185 scratchGPR, ecmaMode, putKind);
15186
15187 gen.generateFastPath(m_jit);
15188
15189 JITCompiler::JumpList slowCases;
15190 if (slowPathTarget.isSet())
15191 slowCases.append(slowPathTarget);
15192 slowCases.append(gen.slowPathJump());
15193
15194 std::unique_ptr<SlowPathGenerator> slowPath;
15195 if (JITCode::useDataIC(JITType::DFGJIT)) {
15196 slowPath = slowPathICCall(
15197 slowCases, this, gen.stubInfo(), stubInfoGPR, CCallHelpers::Address(stubInfoGPR, StructureStubInfo::offsetOfSlowOperation()), gen.slowPathFunction(), NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), stubInfoGPR, valueRegs,
15198 CCallHelpers::CellValue(baseGPR), identifier.rawBits());
15199 } else {
15200 slowPath = slowPathCall(
15201 slowCases, this, gen.slowPathFunction(), NoResult, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(codeOrigin)), gen.stubInfo(), valueRegs,
15202 CCallHelpers::CellValue(baseGPR), identifier.rawBits());
15203 }
15204
15205 m_jit.addPutById(gen, slowPath.get());
15206 addSlowPathGenerator(WTFMove(slowPath));
15207}
15208
15209void SpeculativeJIT::genericJSValueNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_GJJ helperFunction)
15210{
15211 ASSERT(node->isBinaryUseKind(UntypedUse) || node->isBinaryUseKind(AnyBigIntUse) || node->isBinaryUseKind(HeapBigIntUse));
15212 JSValueOperand arg1(this, node->child1(), ManualOperandSpeculation);
15213 JSValueOperand arg2(this, node->child2(), ManualOperandSpeculation);
15214 speculate(node, node->child1());
15215 speculate(node, node->child2());
15216
15217 JSValueRegs arg1Regs = arg1.jsValueRegs();
15218 JSValueRegs arg2Regs = arg2.jsValueRegs();
15219
15220 JITCompiler::JumpList slowPath;
15221
15222 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
15223 GPRFlushedCallResult result(this);
15224 GPRReg resultGPR = result.gpr();
15225
15226 arg1.use();
15227 arg2.use();
15228
15229 flushRegisters();
15230 callOperation(helperFunction, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs);
15231 m_jit.exceptionCheck();
15232
15233 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
15234 return;
15235 }
15236
15237 GPRTemporary result(this, Reuse, arg1, TagWord);
15238 GPRReg resultGPR = result.gpr();
15239
15240 arg1.use();
15241 arg2.use();
15242
15243 if (!isKnownInteger(node->child1().node()))
15244 slowPath.append(m_jit.branchIfNotInt32(arg1Regs));
15245 if (!isKnownInteger(node->child2().node()))
15246 slowPath.append(m_jit.branchIfNotInt32(arg2Regs));
15247
15248 m_jit.compare32(cond, arg1Regs.payloadGPR(), arg2Regs.payloadGPR(), resultGPR);
15249
15250 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node()))
15251 addSlowPathGenerator(slowPathCall(slowPath, this, helperFunction, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs));
15252
15253 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
15254}
15255
15256void SpeculativeJIT::genericJSValuePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_GJJ helperFunction)
15257{
15258 BasicBlock* taken = branchNode->branchData()->taken.block;
15259 BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
15260
15261 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
15262
15263 // The branch instruction will branch to the taken block.
15264 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
15265 if (taken == nextBlock()) {
15266 cond = JITCompiler::invert(cond);
15267 callResultCondition = JITCompiler::Zero;
15268 BasicBlock* tmp = taken;
15269 taken = notTaken;
15270 notTaken = tmp;
15271 }
15272
15273 JSValueOperand arg1(this, node->child1(), ManualOperandSpeculation);
15274 JSValueOperand arg2(this, node->child2(), ManualOperandSpeculation);
15275 speculate(node, node->child1());
15276 speculate(node, node->child2());
15277
15278 JSValueRegs arg1Regs = arg1.jsValueRegs();
15279 JSValueRegs arg2Regs = arg2.jsValueRegs();
15280
15281 JITCompiler::JumpList slowPath;
15282
15283 if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
15284 GPRFlushedCallResult result(this);
15285 GPRReg resultGPR = result.gpr();
15286
15287 arg1.use();
15288 arg2.use();
15289
15290 flushRegisters();
15291 callOperation(helperFunction, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs);
15292 m_jit.exceptionCheck();
15293
15294 branchTest32(callResultCondition, resultGPR, taken);
15295 } else {
15296 GPRTemporary result(this, Reuse, arg2, TagWord);
15297 GPRReg resultGPR = result.gpr();
15298
15299 arg1.use();
15300 arg2.use();
15301
15302 if (!isKnownInteger(node->child1().node()))
15303 slowPath.append(m_jit.branchIfNotInt32(arg1Regs));
15304 if (!isKnownInteger(node->child2().node()))
15305 slowPath.append(m_jit.branchIfNotInt32(arg2Regs));
15306
15307 branch32(cond, arg1Regs.payloadGPR(), arg2Regs.payloadGPR(), taken);
15308
15309 if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
15310 jump(notTaken, ForceJump);
15311
15312 slowPath.link(&m_jit);
15313
15314 silentSpillAllRegisters(resultGPR);
15315 callOperation(helperFunction, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), arg1Regs, arg2Regs);
15316 silentFillAllRegisters();
15317 m_jit.exceptionCheck();
15318
15319 branchTest32(callResultCondition, resultGPR, taken);
15320 }
15321 }
15322
15323 jump(notTaken);
15324
15325 m_indexInBlock = m_block->size() - 1;
15326 m_currentNode = branchNode;
15327}
15328
15329void SpeculativeJIT::compileHeapBigIntEquality(Node* node)
15330{
15331 // FIXME: [ESNext][BigInt] Create specialized version of strict equals for big ints
15332 // https://bugs.webkit.org/show_bug.cgi?id=182895
15333 SpeculateCellOperand left(this, node->child1());
15334 SpeculateCellOperand right(this, node->child2());
15335 GPRTemporary result(this, Reuse, left);
15336 GPRReg leftGPR = left.gpr();
15337 GPRReg rightGPR = right.gpr();
15338 GPRReg resultGPR = result.gpr();
15339
15340 left.use();
15341 right.use();
15342
15343 speculateHeapBigInt(node->child1(), leftGPR);
15344 speculateHeapBigInt(node->child2(), rightGPR);
15345
15346 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, leftGPR, rightGPR);
15347
15348 m_jit.move(JITCompiler::TrustedImm32(1), resultGPR);
15349
15350 JITCompiler::Jump done = m_jit.jump();
15351
15352 notEqualCase.link(&m_jit);
15353
15354 silentSpillAllRegisters(resultGPR);
15355 callOperation(operationCompareStrictEqCell, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), leftGPR, rightGPR);
15356 silentFillAllRegisters();
15357
15358 done.link(&m_jit);
15359
15360 unblessedBooleanResult(resultGPR, node, UseChildrenCalledExplicitly);
15361}
15362
15363void SpeculativeJIT::compileMakeRope(Node* node)
15364{
15365 ASSERT(node->child1().useKind() == KnownStringUse);
15366 ASSERT(node->child2().useKind() == KnownStringUse);
15367 ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
15368
15369 SpeculateCellOperand op1(this, node->child1());
15370 SpeculateCellOperand op2(this, node->child2());
15371 SpeculateCellOperand op3(this, node->child3());
15372 GPRReg opGPRs[3];
15373 unsigned numOpGPRs;
15374 opGPRs[0] = op1.gpr();
15375 opGPRs[1] = op2.gpr();
15376 if (node->child3()) {
15377 opGPRs[2] = op3.gpr();
15378 numOpGPRs = 3;
15379 } else {
15380 opGPRs[2] = InvalidGPRReg;
15381 numOpGPRs = 2;
15382 }
15383
15384#if CPU(ADDRESS64)
15385 Edge edges[3] = {
15386 node->child1(),
15387 node->child2(),
15388 node->child3()
15389 };
15390
15391 GPRTemporary result(this);
15392 GPRTemporary allocator(this);
15393 GPRTemporary scratch(this);
15394 GPRTemporary scratch2(this);
15395 GPRReg resultGPR = result.gpr();
15396 GPRReg allocatorGPR = allocator.gpr();
15397 GPRReg scratchGPR = scratch.gpr();
15398 GPRReg scratch2GPR = scratch2.gpr();
15399
15400 CCallHelpers::JumpList slowPath;
15401 Allocator allocatorValue = allocatorForNonVirtualConcurrently<JSRopeString>(vm(), sizeof(JSRopeString), AllocatorForMode::AllocatorIfExists);
15402 emitAllocateJSCell(resultGPR, JITAllocator::constant(allocatorValue), allocatorGPR, TrustedImmPtr(m_jit.graph().registerStructure(vm().stringStructure.get())), scratchGPR, slowPath);
15403
15404 // This puts nullptr for the first fiber. It makes visitChildren safe even if this JSRopeString is discarded due to the speculation failure in the following path.
15405 m_jit.storePtr(TrustedImmPtr(JSString::isRopeInPointer), CCallHelpers::Address(resultGPR, JSRopeString::offsetOfFiber0()));
15406
15407 {
15408 if (JSString* string = edges[0]->dynamicCastConstant<JSString*>(vm())) {
15409 m_jit.move(TrustedImm32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0), scratchGPR);
15410 m_jit.move(TrustedImm32(string->length()), allocatorGPR);
15411 } else {
15412 bool needsRopeCase = canBeRope(edges[0]);
15413 m_jit.loadPtr(CCallHelpers::Address(opGPRs[0], JSString::offsetOfValue()), scratch2GPR);
15414 CCallHelpers::Jump isRope;
15415 if (needsRopeCase)
15416 isRope = m_jit.branchIfRopeStringImpl(scratch2GPR);
15417
15418 m_jit.load32(CCallHelpers::Address(scratch2GPR, StringImpl::flagsOffset()), scratchGPR);
15419 m_jit.load32(CCallHelpers::Address(scratch2GPR, StringImpl::lengthMemoryOffset()), allocatorGPR);
15420
15421 if (needsRopeCase) {
15422 auto done = m_jit.jump();
15423
15424 isRope.link(&m_jit);
15425 m_jit.load32(CCallHelpers::Address(opGPRs[0], JSRopeString::offsetOfFlags()), scratchGPR);
15426 m_jit.load32(CCallHelpers::Address(opGPRs[0], JSRopeString::offsetOfLength()), allocatorGPR);
15427 done.link(&m_jit);
15428 }
15429 }
15430
15431 if (ASSERT_ENABLED) {
15432 CCallHelpers::Jump ok = m_jit.branch32(
15433 CCallHelpers::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
15434 m_jit.abortWithReason(DFGNegativeStringLength);
15435 ok.link(&m_jit);
15436 }
15437 }
15438
15439 for (unsigned i = 1; i < numOpGPRs; ++i) {
15440 if (JSString* string = edges[i]->dynamicCastConstant<JSString*>(vm())) {
15441 m_jit.and32(TrustedImm32(string->is8Bit() ? StringImpl::flagIs8Bit() : 0), scratchGPR);
15442 speculationCheck(
15443 Uncountable, JSValueSource(), nullptr,
15444 m_jit.branchAdd32(
15445 CCallHelpers::Overflow,
15446 TrustedImm32(string->length()), allocatorGPR));
15447 } else {
15448 bool needsRopeCase = canBeRope(edges[i]);
15449 m_jit.loadPtr(CCallHelpers::Address(opGPRs[i], JSString::offsetOfValue()), scratch2GPR);
15450 CCallHelpers::Jump isRope;
15451 if (needsRopeCase)
15452 isRope = m_jit.branchIfRopeStringImpl(scratch2GPR);
15453
15454 m_jit.and32(CCallHelpers::Address(scratch2GPR, StringImpl::flagsOffset()), scratchGPR);
15455 speculationCheck(
15456 Uncountable, JSValueSource(), nullptr,
15457 m_jit.branchAdd32(
15458 CCallHelpers::Overflow,
15459 CCallHelpers::Address(scratch2GPR, StringImpl::lengthMemoryOffset()), allocatorGPR));
15460 if (needsRopeCase) {
15461 auto done = m_jit.jump();
15462
15463 isRope.link(&m_jit);
15464 m_jit.and32(CCallHelpers::Address(opGPRs[i], JSRopeString::offsetOfFlags()), scratchGPR);
15465 m_jit.load32(CCallHelpers::Address(opGPRs[i], JSRopeString::offsetOfLength()), scratch2GPR);
15466 speculationCheck(
15467 Uncountable, JSValueSource(), nullptr,
15468 m_jit.branchAdd32(
15469 CCallHelpers::Overflow, scratch2GPR, allocatorGPR));
15470 done.link(&m_jit);
15471 }
15472 }
15473 }
15474
15475 if (ASSERT_ENABLED) {
15476 CCallHelpers::Jump ok = m_jit.branch32(
15477 CCallHelpers::GreaterThanOrEqual, allocatorGPR, TrustedImm32(0));
15478 m_jit.abortWithReason(DFGNegativeStringLength);
15479 ok.link(&m_jit);
15480 }
15481
15482 static_assert(StringImpl::flagIs8Bit() == JSRopeString::is8BitInPointer, "");
15483 m_jit.and32(TrustedImm32(StringImpl::flagIs8Bit()), scratchGPR);
15484 m_jit.orPtr(opGPRs[0], scratchGPR);
15485 m_jit.orPtr(TrustedImmPtr(JSString::isRopeInPointer), scratchGPR);
15486 m_jit.storePtr(scratchGPR, CCallHelpers::Address(resultGPR, JSRopeString::offsetOfFiber0()));
15487
15488 m_jit.move(opGPRs[1], scratchGPR);
15489 m_jit.lshiftPtr(TrustedImm32(32), scratchGPR);
15490 m_jit.orPtr(allocatorGPR, scratchGPR);
15491 m_jit.storePtr(scratchGPR, CCallHelpers::Address(resultGPR, JSRopeString::offsetOfFiber1()));
15492
15493 if (numOpGPRs == 2) {
15494 m_jit.move(opGPRs[1], scratchGPR);
15495 m_jit.rshiftPtr(TrustedImm32(32), scratchGPR);
15496 m_jit.storePtr(scratchGPR, CCallHelpers::Address(resultGPR, JSRopeString::offsetOfFiber2()));
15497 } else {
15498 m_jit.move(opGPRs[1], scratchGPR);
15499 m_jit.rshiftPtr(TrustedImm32(32), scratchGPR);
15500 m_jit.move(opGPRs[2], scratch2GPR);
15501 m_jit.lshiftPtr(TrustedImm32(16), scratch2GPR);
15502 m_jit.orPtr(scratch2GPR, scratchGPR);
15503 m_jit.storePtr(scratchGPR, CCallHelpers::Address(resultGPR, JSRopeString::offsetOfFiber2()));
15504 }
15505
15506 auto isNonEmptyString = m_jit.branchTest32(CCallHelpers::NonZero, allocatorGPR);
15507
15508 m_jit.move(TrustedImmPtr::weakPointer(m_jit.graph(), jsEmptyString(m_jit.graph().m_vm)), resultGPR);
15509
15510 isNonEmptyString.link(&m_jit);
15511 m_jit.mutatorFence(vm());
15512
15513 switch (numOpGPRs) {
15514 case 2:
15515 addSlowPathGenerator(slowPathCall(
15516 slowPath, this, operationMakeRope2, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), opGPRs[0], opGPRs[1]));
15517 break;
15518 case 3:
15519 addSlowPathGenerator(slowPathCall(
15520 slowPath, this, operationMakeRope3, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), opGPRs[0], opGPRs[1], opGPRs[2]));
15521 break;
15522 default:
15523 RELEASE_ASSERT_NOT_REACHED();
15524 break;
15525 }
15526
15527 cellResult(resultGPR, node);
15528#else
15529 flushRegisters();
15530 GPRFlushedCallResult result(this);
15531 GPRReg resultGPR = result.gpr();
15532 switch (numOpGPRs) {
15533 case 2:
15534 callOperation(operationMakeRope2, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), opGPRs[0], opGPRs[1]);
15535 m_jit.exceptionCheck();
15536 break;
15537 case 3:
15538 callOperation(operationMakeRope3, resultGPR, TrustedImmPtr::weakPointer(m_graph, m_graph.globalObjectFor(node->origin.semantic)), opGPRs[0], opGPRs[1], opGPRs[2]);
15539 m_jit.exceptionCheck();
15540 break;
15541 default:
15542 RELEASE_ASSERT_NOT_REACHED();
15543 break;
15544 }
15545
15546 cellResult(resultGPR, node);
15547#endif
15548}
15549
15550} } // namespace JSC::DFG
15551
15552#endif
15553